branch_name
stringclasses 22
values | content
stringlengths 18
81.8M
| directory_id
stringlengths 40
40
| languages
sequencelengths 1
36
| num_files
int64 1
7.38k
| repo_language
stringclasses 151
values | repo_name
stringlengths 7
101
| revision_id
stringlengths 40
40
| snapshot_id
stringlengths 40
40
|
---|---|---|---|---|---|---|---|---|
refs/heads/main | <repo_name>hugues0/material-ui-newsfeed<file_sep>/src/components/Post.js
import {
Button,
Card,
CardActionArea,
CardActions,
CardContent,
CardMedia,
Container,
makeStyles,
Typography,
} from "@material-ui/core";
const useStyles = makeStyles((theme) => ({
media: {
height: 250,
width:"100%",
[theme.breakpoints.down("sm")]: {
height: 150,
},
},
card: {
marginBottom: theme.spacing(3),
[theme.breakpoints.down("sm")]: {
width: 400,
},
},
}));
const Post = () => {
const classes = useStyles();
return (
<Card className={classes.card}>
<CardActionArea>
<CardMedia
className={classes.media}
image="https://pbs.twimg.com/profile_images/1200529452/Bart_Simpson_400x400.jpg"
title="My Post"
/>
<CardContent>
<Typography gutterBottom variant="h5">My first Post</Typography>
<Typography variant="body2">
Lorem Ipsum is simply dummy text of the printing and typesetting
industry. Lorem Ipsum has been the industry's standard dummy text
ever since the 1500s, when an unknown printer took a galley of type
and scrambled it to make a type specimen book. It has survived not
only five centuries, but also the leap into electronic typesetting,
remaining essentially unchanged.{" "}
</Typography>
</CardContent>
</CardActionArea>
<CardActions>
<Button size="small" color="primary">Share</Button>
<Button size="small" color="primary">Learn more</Button>
</CardActions>
</Card>
);
};
export default Post;
| 97112704fe8b15ff5152c113403240dc8393d1dc | [
"JavaScript"
] | 1 | JavaScript | hugues0/material-ui-newsfeed | d44fe576c3dd6f3a11575b84e6ae72fe7bed6a8f | 18bb32ff63570e20583227a8f25a332fa3dbaabe |
refs/heads/master | <file_sep>execute "append java home to hadoop setenv.sh" do
command %Q{echo "export JAVA_HOME=$JAVA_HOME" >> "#{node.hadoop.user_home}/hadoop-#{node.hadoop.version}/conf/hadoop-env.sh"}
user node.hadoop.user
end
execute "create directory through execute, coz chef is stupid with permission on recursive" do
command "mkdir -p #{node.hadoop.data_dir}"
user node.hadoop.user
group node.hadoop.user_group_name
not_if {::File.exists?("#{node.hadoop.data_dir}")}
end
template "#{node.hadoop.user_home}/hadoop-#{node.hadoop.version}/conf/core-site.xml" do
owner node.hadoop.user
group node.hadoop.user_group_name
mode 0664
variables :data_dir => node.hadoop.data_dir
end
template "#{node.hadoop.user_home}/hadoop-#{node.hadoop.version}/conf/mapred-site.xml" do
owner node.hadoop.user
group node.hadoop.user_group_name
mode 0664
end
template "#{node.hadoop.user_home}/hadoop-1.2.1/conf/hdfs-site.xml" do
owner node.hadoop.user
group node.hadoop.user_group_name
mode 0664
end
execute "transfer ownership to hduser" do
command "chown -R #{node.hadoop.user}:#{node.hadoop.user_group_name} #{node.hadoop.user_home}"
end
cookbook_file "VideoCount.java" do
path "#{node.hadoop.user_home}/VideoCount.java"
action :create_if_missing
owner node.hadoop.user
group node.hadoop.user_group_name
mode 00644
end
cookbook_file "VideoCountMap.java" do
path "#{node.hadoop.user_home}/VideoCountMap.java"
action :create_if_missing
owner node.hadoop.user
group node.hadoop.user_group_name
mode 00644
end
cookbook_file "VideoCountReduce.java" do
path "#{node.hadoop.user_home}/VideoCountReduce.java"
action :create_if_missing
owner node.hadoop.user
group node.hadoop.user_group_name
mode 00644
end
execute "format data folder" do
command "./hadoop-#{node.hadoop.version}/bin/hadoop namenode -format -nonInteractive"
user node.hadoop.user
cwd node.hadoop.user_home
end
directory "#{node.hadoop.user_home}/classes" do
owner node.hadoop.user
group node.hadoop.user_group_name
mode 00755
action :create
end<file_sep># -*- mode: ruby -*-
# vi: set ft=ruby :
Vagrant.configure("2") do |config|
config.vm.box = "precise64"
config.vm.box_url = "http://files.vagrantup.com/precise64.box"
config.vm.network "forwarded_port", guest: 50030, host: 51130
config.vm.synced_folder "/Users/bezidejni/Documents/faks/diplomski/rznu/lab1/restapi/hadoop", "/hadoop"
config.vm.provision :shell, :inline => "sudo apt-get update"
config.vm.provision :chef_solo do |chef|
chef.cookbooks_path = "cookbooks"
chef.add_recipe 'java'
chef.add_recipe 'hadoop'
end
end
| 323dc73142e421ce137a996b2674ceeeaf0f5559 | [
"Ruby"
] | 2 | Ruby | bezidejni/single-node-hadoop | 9606e5a9aedbf76388fbf2e2ed22fe1a3f6c4234 | f258bc9507bf224b723eb340184be45753c80387 |
refs/heads/master | <repo_name>mateus-vicente/allpix-squared<file_sep>/tools/tcad_dfise_converter/dfise_converter.cpp
#include "dfise_converter.h"
#include <algorithm>
#include <cfloat>
#include <chrono>
#include <climits>
#include <csignal>
#include <cstdlib>
#include <fstream>
#include <iostream>
#include <sstream>
#include <string>
#include <utility>
#include <Eigen/Eigen>
#include "../../src/core/utils/log.h"
#include "Octree.hpp"
#include "read_dfise.h"
std::pair<Point, bool> barycentric_interpolation(Point query_point,
std::vector<Point> tetra_vertices,
std::vector<Point> tetra_vertices_field,
double tetra_volume) {
// Algorithm variables
bool volume_signal = true;
bool sub_1_signal, sub_2_signal, sub_3_signal, sub_4_signal;
Eigen::Matrix4d matrix_sub1, matrix_sub2, matrix_sub3, matrix_sub4;
double tetra_subvol_1, tetra_subvol_2, tetra_subvol_3, tetra_subvol_4;
// Return variable. Point(interpolated electric field x, y, z)
Point efield_int;
bool flag = true;
std::pair<Point, bool> efield_valid;
// Function must have tetra_vertices.size() = 4
if(tetra_vertices.size() != 4) {
throw std::invalid_argument("Baricentric interpolation without only 4 vertices!");
}
if(tetra_volume > 0) {
volume_signal = true;
}
if(tetra_volume < 0) {
volume_signal = false;
}
// Jacobi Matrix for volume calculation for each tetrahedron with a vertex replaced by the query point
matrix_sub1 << 1, 1, 1, 1, query_point.x, tetra_vertices[1].x, tetra_vertices[2].x, tetra_vertices[3].x, query_point.y,
tetra_vertices[1].y, tetra_vertices[2].y, tetra_vertices[3].y, query_point.z, tetra_vertices[1].z,
tetra_vertices[2].z, tetra_vertices[3].z;
tetra_subvol_1 = (matrix_sub1.determinant()) / 6;
if(tetra_subvol_1 > 0) {
sub_1_signal = true;
}
if(tetra_subvol_1 < 0) {
sub_1_signal = false;
}
if(tetra_subvol_1 == 0) {
sub_1_signal = volume_signal;
}
matrix_sub2 << 1, 1, 1, 1, tetra_vertices[0].x, query_point.x, tetra_vertices[2].x, tetra_vertices[3].x,
tetra_vertices[0].y, query_point.y, tetra_vertices[2].y, tetra_vertices[3].y, tetra_vertices[0].z, query_point.z,
tetra_vertices[2].z, tetra_vertices[3].z;
tetra_subvol_2 = (matrix_sub2.determinant()) / 6;
if(tetra_subvol_2 > 0) {
sub_2_signal = true;
}
if(tetra_subvol_2 < 0) {
sub_2_signal = false;
}
if(tetra_subvol_2 == 0) {
sub_2_signal = volume_signal;
}
matrix_sub3 << 1, 1, 1, 1, tetra_vertices[0].x, tetra_vertices[1].x, query_point.x, tetra_vertices[3].x,
tetra_vertices[0].y, tetra_vertices[1].y, query_point.y, tetra_vertices[3].y, tetra_vertices[0].z,
tetra_vertices[1].z, query_point.z, tetra_vertices[3].z;
tetra_subvol_3 = (matrix_sub3.determinant()) / 6;
if(tetra_subvol_3 > 0) {
sub_3_signal = true;
}
if(tetra_subvol_3 < 0) {
sub_3_signal = false;
}
if(tetra_subvol_3 == 0) {
sub_3_signal = volume_signal;
}
matrix_sub4 << 1, 1, 1, 1, tetra_vertices[0].x, tetra_vertices[1].x, tetra_vertices[2].x, query_point.x,
tetra_vertices[0].y, tetra_vertices[1].y, tetra_vertices[2].y, query_point.y, tetra_vertices[0].z,
tetra_vertices[1].z, tetra_vertices[2].z, query_point.z;
tetra_subvol_4 = (matrix_sub4.determinant()) / 6;
if(tetra_subvol_4 > 0) {
sub_4_signal = true;
}
if(tetra_subvol_4 < 0) {
sub_4_signal = false;
}
if(tetra_subvol_4 == 0) {
sub_4_signal = volume_signal;
}
// Electric field interpolation
efield_int.x = (tetra_subvol_1 * tetra_vertices_field[0].x + tetra_subvol_2 * tetra_vertices_field[1].x +
tetra_subvol_3 * tetra_vertices_field[2].x + tetra_subvol_4 * tetra_vertices_field[3].x) /
tetra_volume;
efield_int.y = (tetra_subvol_1 * tetra_vertices_field[0].y + tetra_subvol_2 * tetra_vertices_field[1].y +
tetra_subvol_3 * tetra_vertices_field[2].y + tetra_subvol_4 * tetra_vertices_field[3].y) /
tetra_volume;
efield_int.z = (tetra_subvol_1 * tetra_vertices_field[0].z + tetra_subvol_2 * tetra_vertices_field[1].z +
tetra_subvol_3 * tetra_vertices_field[2].z + tetra_subvol_4 * tetra_vertices_field[3].z) /
tetra_volume;
// Check if query point is outside tetrahedron
if(sub_1_signal != volume_signal || sub_2_signal != volume_signal || sub_3_signal != volume_signal ||
sub_4_signal != volume_signal) {
flag = false;
LOG(DEBUG) << "Warning: Point outside tetrahedron";
efield_valid = std::make_pair(efield_int, flag);
return efield_valid;
}
for(size_t i = 0; i < tetra_vertices.size(); i++) {
auto distance = unibn::L2Distance<Point>::compute(tetra_vertices[i], query_point);
LOG(DEBUG) << "Tetrahedron vertex (" << tetra_vertices[i].x << ", " << tetra_vertices[i].y << ", "
<< tetra_vertices[i].z << ") - "
<< " Distance: " << distance << " - Electric field: (" << tetra_vertices_field[i].x << ", "
<< tetra_vertices_field[i].y << ", " << tetra_vertices_field[i].z << ").";
}
LOG(DEBUG) << "Tetra full volume: " << tetra_volume << std::endl
<< "Tetra sub volume 1: " << tetra_subvol_1 << std::endl
<< "Tetra sub volume 2: " << tetra_subvol_2 << std::endl
<< "Tetra sub volume 3: " << tetra_subvol_3 << std::endl
<< "Tetra sub volume 4: " << tetra_subvol_4 << std::endl
<< "Volume difference: "
<< tetra_volume - (tetra_subvol_1 + tetra_subvol_2 + tetra_subvol_3 + tetra_subvol_4);
LOG(DEBUG) << "Interpolated electric field: (" << efield_int.x << "," << efield_int.y << "," << efield_int.z << ")";
efield_valid = std::make_pair(efield_int, flag);
return efield_valid;
}
void interrupt_handler(int) {
LOG(STATUS) << "Interrupted! Aborting conversion...";
allpix::Log::finish();
std::exit(0);
}
int main(int argc, char** argv) {
// If no arguments are provided, print the help:
bool print_help = false;
int return_code = 0;
if(argc == 1) {
print_help = true;
return_code = 1;
}
// Add stream and set default logging level
allpix::Log::addStream(std::cout);
allpix::Log::setReportingLevel(allpix::LogLevel::INFO);
// Install abort handler (CTRL+\) and interrupt handler (CTRL+C)
std::signal(SIGQUIT, interrupt_handler);
std::signal(SIGINT, interrupt_handler);
std::string file_prefix;
std::string init_file_prefix;
std::string log_file_name;
std::string region = "bulk"; // Sensor bulk region name on DF-ISE file
float volume_cut = std::numeric_limits<float>::min(); // Enclosing tetrahedron should have volume != 0
size_t index_cut = 10000000; // Permutation index initial cut
bool index_cut_flag = false;
float initial_radius = 1; // Neighbour vertex search radius
float radius_step = 0.5; // Search radius increment
float max_radius = 10; // Maximum search radiuss
int xdiv = 100; // New mesh X pitch
int ydiv = 100; // New mesh Y pitch
int zdiv = 100; // New mesh Z pitch
for(int i = 1; i < argc; i++) {
if(strcmp(argv[i], "-h") == 0) {
print_help = true;
} else if(strcmp(argv[i], "-v") == 0 && (i + 1 < argc)) {
try {
allpix::LogLevel log_level = allpix::Log::getLevelFromString(std::string(argv[++i]));
allpix::Log::setReportingLevel(log_level);
} catch(std::invalid_argument& e) {
LOG(ERROR) << "Invalid verbosity level \"" << std::string(argv[i]) << "\", ignoring overwrite";
return_code = 1;
}
} else if(strcmp(argv[i], "-f") == 0 && (i + 1 < argc)) {
file_prefix = std::string(argv[++i]);
} else if(strcmp(argv[i], "-o") == 0 && (i + 1 < argc)) {
init_file_prefix = std::string(argv[++i]);
} else if(strcmp(argv[i], "-R") == 0 && (i + 1 < argc)) {
region = std::string(argv[++i]);
} else if(strcmp(argv[i], "-r") == 0 && (i + 1 < argc)) {
initial_radius = static_cast<float>(strtod(argv[++i], nullptr));
} else if(strcmp(argv[i], "-s") == 0 && (i + 1 < argc)) {
radius_step = static_cast<float>(strtod(argv[++i], nullptr));
} else if(strcmp(argv[i], "-m") == 0 && (i + 1 < argc)) {
max_radius = static_cast<float>(strtod(argv[++i], nullptr));
} else if(strcmp(argv[i], "-i") == 0 && (i + 1 < argc)) {
index_cut = static_cast<size_t>(strtod(argv[++i], nullptr));
index_cut_flag = true;
} else if(strcmp(argv[i], "-c") == 0 && (i + 1 < argc)) {
volume_cut = static_cast<float>(strtod(argv[++i], nullptr));
} else if(strcmp(argv[i], "-x") == 0 && (i + 1 < argc)) {
xdiv = static_cast<int>(strtod(argv[++i], nullptr));
} else if(strcmp(argv[i], "-y") == 0 && (i + 1 < argc)) {
ydiv = static_cast<int>(strtod(argv[++i], nullptr));
} else if(strcmp(argv[i], "-z") == 0 && (i + 1 < argc)) {
zdiv = static_cast<int>(strtod(argv[++i], nullptr));
} else if(strcmp(argv[i], "-l") == 0 && (i + 1 < argc)) {
log_file_name = std::string(argv[++i]);
} else {
LOG(ERROR) << "Unrecognized command line argument or missing value \"" << argv[i] << "\"";
print_help = true;
return_code = 1;
}
}
if(file_prefix.empty()) {
print_help = true;
return_code = 1;
}
if(init_file_prefix.empty()) {
init_file_prefix = file_prefix;
auto sep_idx = init_file_prefix.find_last_of('/');
if(sep_idx != std::string::npos) {
init_file_prefix = init_file_prefix.substr(sep_idx + 1);
}
}
// Print help if requested or no arguments given
if(print_help) {
std::cerr << "Usage: ./tcad_dfise_reader -f <file_name> [<options>]" << std::endl;
std::cout << "\t -f <file_prefix> common prefix of DF-ISE grid (.grd) and data (.dat) files" << std::endl;
std::cout << "\t -o <init_file_prefix> output file prefix without .init (defaults to file name of <file_prefix>)"
<< std::endl;
std::cout << "\t -R <region> region name to be meshed (defaults to 'bulk')" << std::endl;
std::cout << "\t -r <radius> initial node neighbors search radius in um (defaults to 1 um)" << std::endl;
std::cout << "\t -s <radius_step> radius step if no neighbor is found (defaults to 0.5 um)" << std::endl;
std::cout << "\t -m <max_radius> maximum search radius (default is 10 um)" << std::endl;
std::cout << "\t -i <index_cut> index cut during permutation on vertex neighbours (disabled by default)"
<< std::endl;
std::cout << "\t -c <volume_cut> minimum volume for tetrahedron for non-coplanar vertices (defaults to "
"minimum float value)"
<< std::endl;
std::cout << "\t -x <mesh x_pitch> new regular mesh X pitch (defaults to 100)" << std::endl;
std::cout << "\t -y <mesh_y_pitch> new regular mesh Y pitch (defaults to 100)" << std::endl;
std::cout << "\t -z <mesh_z_pitch> new regular mesh Z pitch (defaults to 100)" << std::endl;
std::cout << "\t -l <file> file to log to besides standard output (disabled by default)" << std::endl;
std::cout << "\t -v <level> verbosity level (default reporiting level is INFO)" << std::endl;
allpix::Log::finish();
return return_code;
}
// NOTE: this stream should be available for the duration of the logging
std::ofstream log_file;
if(!log_file_name.empty()) {
log_file.open(log_file_name, std::ios_base::out | std::ios_base::trunc);
if(!log_file.good()) {
LOG(FATAL) << "Cannot write to provided log file! Check if permissions are sufficient.";
allpix::Log::finish();
return 1;
}
allpix::Log::addStream(log_file);
}
auto start = std::chrono::system_clock::now();
LOG(STATUS) << "Reading mesh grid from grid file";
std::string grid_file = file_prefix + ".grd";
std::vector<Point> points;
try {
auto region_grid = read_grid(grid_file);
points = region_grid[region];
} catch(std::runtime_error& e) {
LOG(FATAL) << "Failed to parse grid file " << grid_file;
LOG(FATAL) << " " << e.what();
allpix::Log::finish();
return 1;
}
LOG(STATUS) << "Reading electric field from data file";
std::string data_file = file_prefix + ".dat";
std::vector<Point> field;
try {
auto region_fields = read_electric_field(data_file);
field = region_fields[region];
} catch(std::runtime_error& e) {
LOG(FATAL) << "Failed to parse data file " << data_file;
LOG(FATAL) << " " << e.what();
allpix::Log::finish();
return 1;
}
if(points.size() != field.size()) {
LOG(FATAL) << "Field and grid file do not match";
allpix::Log::finish();
return 1;
}
/* ALERT fix coordinates */
for(unsigned int i = 0; i < points.size(); ++i) {
std::swap(points[i].y, points[i].z);
std::swap(field[i].y, field[i].z);
}
// Find minimum and maximum from mesh coordinates
double minx = DBL_MAX, miny = DBL_MAX, minz = DBL_MAX;
double maxx = DBL_MIN, maxy = DBL_MIN, maxz = DBL_MIN;
for(auto& point : points) {
minx = std::min(minx, point.x);
miny = std::min(miny, point.y);
minz = std::min(minz, point.z);
maxx = std::max(maxx, point.x);
maxy = std::max(maxy, point.y);
maxz = std::max(maxz, point.z);
}
/*
* ALERT invert the z-axis to match the ap2 system
* WARNING this will remove the right-handedness of the coordinate system!
*/
for(size_t i = 0; i < points.size(); ++i) {
points[i].z = maxz - (points[i].z - minz);
field[i].z = -field[i].z;
}
auto end = std::chrono::system_clock::now();
auto elapsed_seconds = std::chrono::duration_cast<std::chrono::seconds>(end - start).count();
LOG(INFO) << "Reading the files took " << elapsed_seconds << " seconds.";
LOG(STATUS) << "Starting regular grid interpolation";
// Initializing the Octree with points from mesh cloud.
unibn::Octree<Point> octree;
octree.initialize(points);
// Creating a new mesh points cloud with a regular pitch
std::vector<Point> e_field_new_mesh;
double xstep = (maxx - minx) / static_cast<double>(xdiv);
double ystep = (maxy - miny) / static_cast<double>(ydiv);
double zstep = (maxz - minz) / static_cast<double>(zdiv);
double x = minx + xstep / 2.0;
for(int i = 0; i < xdiv; ++i) {
double y = miny + ystep / 2.0;
for(int j = 0; j < ydiv; ++j) {
double z = minz + zstep / 2.0;
for(int k = 0; k < zdiv; ++k) {
Point q(x, y, z); // New mesh vertex
Point e(x, y, z); // Corresponding, to be interpolated, electric field
bool flag = false;
std::pair<Point, bool> return_interpolation;
LOG_PROGRESS(INFO, "POINT") << "Interpolating point X=" << i + 1 << " Y=" << j + 1 << " Z=" << k + 1 << " ("
<< q.x << "," << q.y << "," << q.z << ")";
size_t prev_neighbours = 0;
float radius = initial_radius;
size_t index_cut_up;
while(radius < max_radius) {
LOG(TRACE) << "Search radius: " << radius;
// Calling octree neighbours search and sorting the results list with the closest neighbours first
std::vector<unsigned int> results;
octree.radiusNeighbors<unibn::L2Distance<Point>>(q, radius, results);
std::sort(results.begin(), results.end(), [&](unsigned int a, unsigned int b) {
return unibn::L2Distance<Point>::compute(points[a], q) <
unibn::L2Distance<Point>::compute(points[b], q);
});
LOG(TRACE) << "Number of vertices found: " << results.size();
if(results.empty()) {
LOG(WARNING) << "At vertex (" << x << ", " << y << ", " << z << ")" << std::endl
<< "Radius too Small. No neighbours found for radius " << radius << std::endl
<< "Increasing the readius (setting a higher initial radius may help)";
radius = radius + radius_step;
continue;
}
if(results.size() < 4) {
LOG(WARNING) << "At vertex (" << x << ", " << y << ", " << z << ")" << std::endl
<< "Incomplete mesh element found for radius " << radius << std::endl
<< "Increasing the readius (setting a higher initial radius may help)";
radius = radius + radius_step;
continue;
}
// If after a radius step no new neighbours are found, go to the next radius step
if(results.size() > prev_neighbours) {
prev_neighbours = results.size();
} else {
LOG(WARNING) << "At vertex (" << x << ", " << y << ", " << z << ")" << std::endl
<< "No new neighbour after radius step. Going to next step.";
radius = radius + radius_step;
continue;
}
// Finding tetrahedrons
double volume;
Eigen::Matrix4d matrix;
size_t num_nodes_element = 4;
std::vector<Point> tetra_vertices;
std::vector<Point> tetra_vertices_field;
std::vector<int> bitmask(num_nodes_element, 1);
bitmask.resize(results.size(), 0);
std::vector<size_t> index;
if(!index_cut_flag) {
index_cut = results.size();
}
index_cut_up = index_cut;
while(index_cut_up <= results.size()) {
do {
index.clear();
tetra_vertices.clear();
tetra_vertices_field.clear();
// print integers and permute bitmask
for(size_t idk = 0; idk < results.size(); ++idk) {
if(bitmask[idk] != 0) {
index.push_back(idk);
tetra_vertices.push_back(points[results[idk]]);
tetra_vertices_field.push_back(field[results[idk]]);
}
if(index.size() == 4) {
break;
}
}
if(index[0] > index_cut_up || index[1] > index_cut_up || index[2] > index_cut_up ||
index[3] > index_cut_up) {
continue;
}
LOG(TRACE) << "Parsing neighbors [index]: " << index[0] << ", " << index[1] << ", " << index[2]
<< ", " << index[3];
matrix << 1, 1, 1, 1, points[results[index[0]]].x, points[results[index[1]]].x,
points[results[index[2]]].x, points[results[index[3]]].x, points[results[index[0]]].y,
points[results[index[1]]].y, points[results[index[2]]].y, points[results[index[3]]].y,
points[results[index[0]]].z, points[results[index[1]]].z, points[results[index[2]]].z,
points[results[index[3]]].z;
volume = (matrix.determinant()) / 6;
if(std::abs(volume) <= volume_cut) {
LOG(DEBUG) << "Coplanar vertices. Going to the next vertex combination.";
continue;
}
try {
return_interpolation =
barycentric_interpolation(q, tetra_vertices, tetra_vertices_field, volume);
e = return_interpolation.first;
flag = return_interpolation.second;
} catch(std::invalid_argument& exception) {
LOG(DEBUG) << "Failed to interpolate point: " << exception.what();
continue;
}
if(flag == false) {
continue;
}
break;
} while(std::prev_permutation(bitmask.begin(), bitmask.end()));
if(tetra_vertices.size() == 4 && flag == true) {
break;
}
LOG(DEBUG) << "All combinations tried up to index " << index_cut_up
<< " done. Increasing the index cut.";
index_cut_up = index_cut_up + index_cut;
}
if(tetra_vertices.size() == 4 && flag == true) {
break;
}
LOG(DEBUG) << "All combinations tried. Increasing the radius.";
radius = radius + radius_step;
}
if(flag == false) {
LOG(FATAL) << "Couldn't interpolate new mesh point, probably the grid is too irregular";
return 1;
}
e_field_new_mesh.push_back(e);
z += zstep;
}
y += ystep;
}
x += xstep;
}
end = std::chrono::system_clock::now();
elapsed_seconds = std::chrono::duration_cast<std::chrono::seconds>(end - start).count();
LOG(INFO) << "New mesh created in " << elapsed_seconds << " seconds.";
LOG(STATUS) << "Writing INIT file";
std::ofstream init_file;
std::stringstream init_file_name;
init_file_name << init_file_prefix << ".init";
init_file.open(init_file_name.str());
// Write INIT file h"eader
init_file << "tcad_octree_writer" << std::endl; // NAME
init_file << "##SEED## ##EVENTS##" << std::endl; // UNUSED
init_file << "##TURN## ##TILT## 1.0" << std::endl; // UNUSED
init_file << "0.0 0.0 0.0" << std::endl; // MAGNETIC FIELD (UNUSED)
init_file << (maxz - minz) << " " << (maxx - minx) << " " << (maxy - miny) << " "; // PIXEL DIMENSIONS
init_file << "0.0 0.0 0.0 0.0 "; // UNUSED
init_file << xdiv << " " << ydiv << " " << zdiv << " "; // GRID SIZE
init_file << "0.0" << std::endl; // UNUSED
// Write INIT file data
for(int i = 0; i < xdiv; ++i) {
for(int j = 0; j < ydiv; ++j) {
for(int k = 0; k < zdiv; ++k) {
auto& point = e_field_new_mesh[static_cast<unsigned int>(i * ydiv * zdiv + j * zdiv + k)];
init_file << i + 1 << " " << j + 1 << " " << k + 1 << " " << point.x << " " << point.y << " " << point.z
<< std::endl;
}
}
}
init_file.close();
end = std::chrono::system_clock::now();
elapsed_seconds = std::chrono::duration_cast<std::chrono::seconds>(end - start).count();
LOG(STATUS) << "Conversion completed in " << elapsed_seconds << " seconds.";
allpix::Log::finish();
return 1;
}
<file_sep>/src/objects/MCParticle.cpp
/**
* @file
* @brief Implementation of Monte-Carlo particle object
* @copyright MIT License
*/
#include "MCParticle.hpp"
using namespace allpix;
MCParticle::MCParticle(ROOT::Math::XYZPoint local_entry_point,
ROOT::Math::XYZPoint global_entry_point,
ROOT::Math::XYZPoint local_exit_point,
ROOT::Math::XYZPoint global_exit_point,
int particle_id)
: local_entry_point_(std::move(local_entry_point)), global_entry_point_(std::move(global_entry_point)),
local_exit_point_(std::move(local_exit_point)), global_exit_point_(std::move(global_exit_point)),
particle_id_(particle_id) {}
ROOT::Math::XYZPoint MCParticle::getLocalEntryPoint() const {
return local_entry_point_;
}
ROOT::Math::XYZPoint MCParticle::getGlobalEntryPoint() const {
return global_entry_point_;
}
ROOT::Math::XYZPoint MCParticle::getLocalExitPoint() const {
return local_exit_point_;
}
ROOT::Math::XYZPoint MCParticle::getGlobalExitPoint() const {
return global_exit_point_;
}
int MCParticle::getParticleID() const {
return particle_id_;
}
ClassImp(MCParticle)
<file_sep>/src/objects/PixelHit.cpp
/**
* @file
* @brief Implementation of object with digitized pixel hit
* @copyright MIT License
*/
#include "PixelHit.hpp"
#include <set>
#include "DepositedCharge.hpp"
#include "PropagatedCharge.hpp"
#include "exceptions.h"
using namespace allpix;
PixelHit::PixelHit(Pixel pixel, double time, double signal, const PixelCharge* pixel_charge)
: pixel_(std::move(pixel)), time_(time), signal_(signal) {
pixel_charge_ = const_cast<PixelCharge*>(pixel_charge); // NOLINT
}
Pixel PixelHit::getPixel() const {
return pixel_;
}
/**
* @throws MissingReferenceException If the pointed object is not in scope
*
* Object is stored as TRef and can only be accessed if pointed object is in scope
*/
const PixelCharge* PixelHit::getPixelCharge() const {
auto pixel_charge = dynamic_cast<PixelCharge*>(pixel_charge_.GetObject());
if(pixel_charge == nullptr) {
throw MissingReferenceException(typeid(*this), typeid(PixelCharge));
}
return pixel_charge;
}
/**
* @throws MissingReferenceException If some object in the required history is not in scope
*
* MCParticles can only be fetched if the full history of objects are in scope and stored
*/
std::vector<const MCParticle*> PixelHit::getMCParticles() const {
auto pixel_charge = getPixelCharge();
// Find particles corresponding to every propagated charge
auto propagated_charges = pixel_charge->getPropagatedCharges();
std::set<const MCParticle*> unique_particles;
for(auto& propagated_charge : propagated_charges) {
auto deposited_charge = propagated_charge->getDepositedCharge();
auto mc_particle = deposited_charge->getMCParticle();
// NOTE if any deposited charge has no related mc particle this will fail
unique_particles.insert(mc_particle);
}
// Return as a vector of mc particles
return std::vector<const MCParticle*>(unique_particles.begin(), unique_particles.end());
}
ClassImp(PixelHit)
<file_sep>/doc/usermanual/chapters/additional.tex
\section{Additional Tools \& Resources}
\label{sec:additional_tools_resources}
\todo{I think all the AP2 tools belong here as well}
\subsection{Framework Tools}
\subsubsection{ROOT and Geant4 utilities}
\label{sec:root_and_geant4_utilities}
The framework provides a set of methods to ease the integration of ROOT and Geant4 in the framework. An important part is the extension of the custom conversion \texttt{to\_string} and \texttt{from\_string} methods from the internal string utilities (see Section \ref{sec:string_utilities}) to support internal ROOT and Geant4 classes. This allows for directly reading configuration parameters to those types, making the code in the modules both shorter and cleaner. Besides this, some other conversions functions are provided together with other useful utilities (for example to display a ROOT vector with units attached).
\subsubsection{Runge-Kutta integrator}
A fast Eigen-powered~\cite{eigen3} Runge-Kutta integrator is provided as a tool to solve differential equations. The Runge-Kutta integrator is build genericly and supports multiple methods using different tableaus. It allows to integrate every system of equations in several steps with customizable timestep. This time step can also be updated during the integration depending on the error of the Runge-Kutta method (when a tableau with errors is used).
\inputmd{tools/tcad_dfise_converter.tex}
% FIXME This label is not required to bind correctly
\label{sec:tcad_electric_field_converter}
\inputmd{tools/root_analysis_macros.tex}
% FIXME This label is not required to bind correctly
\label{sec:root_analysis_macros}
<file_sep>/src/modules/ElectricFieldReader/README.md
## ElectricFieldReader
**Maintainer**: <NAME> (<<EMAIL>>)
**Status**: Functional
#### Description
Adds an electric field to the detector from the standard supported sources. By default every detector has no electric field in the sensitive device.
The reader does work with two models of electric field to read:
* For *constant* electric fields it add a constant electric field in the z-direction towards the pixel implants.
* For *linear* electric fields the field has a constant slope determined by the bias_voltage, the depletion_voltage and the depletion depth.
* For electric fields in the *INIT* format it parses a file the INIT format used in the PixelAV software. An example of a electric field in this format can be found in *etc/example_electric_field.init* in the repository. An explanation of the format is available in the source code of this module.
Furthermore the module can produce a plot the electric field profile on an projection axis normal to the x,y or z-axis at a particular plane in the sensor.
#### Parameters
* `model` : Type of the electric field model, either **linear**, **constant** or **init**.
* `bias_voltage` : Voltage over the whole sensor thickness. Used to calculate the electric field if the *model* parameter is equal to **constant** or **linear**.
* `file_name` : Location of file containing the electric field in the INIT format. Only used if the *model* parameter has the value **init**.
* `output_plots` : Determines if output plots should be generated (slows down simulation). Disabled by default.
* `output_plots_steps` : Number of bins in both the X and Y direction in the 2D histogram used to plot the electric field in the detectors. Only used if `output_plots` is enabled.
* `output_plots_project` : Axis to project the 3D electric field on to create the 2D histogram. Either **x**, **y** or **z**. Only used if `output_plots` is enabled.
* `output_plots_projection_percentage` : Percentage on the projection axis to plot the electric field profile. For example if *output_plots_project* is **x** and this parameter is 0.5 the profile is plotted in the Y,Z-plane at the X-coordinate in the middle of the sensor.
* `output_plots_single_pixel`: Determines if the whole sensor has to be plotted or only a single pixel. Defaults to true (plotting a single pixel).
#### Usage
An example to add a linear field of 50 volt bias to all the detectors, apart from the detector with name 'dut' where a specific INIT field is added, is given below
```ini
[ElectricFieldReader]
model = "linear"
voltage = 50V
[ElectricFieldReader]
name = "dut"
model = "init"
# Should point to the example electric field in the repositories etc directory
file_name = "example_electric_field.init"
```
<file_sep>/src/modules/DepositionGeant4/README.md
## DepositionGeant4
**Maintainer**: <NAME> (<<EMAIL>>)
**Status**: Functional
**Output**: DepositedCharge, MCParticle
#### Description
Module that creates the deposits in the sensitive devices, wrapper around the Geant4 logic. Depends on a geometry construction in the GeometryBuilderGeant4 module. Initializes the physical processes to simulate and create a particle source that will generate particles in every event. For all particles passing the detectors in the geometry, the energy loss is converted into charge deposits for all steps (of customizable size) in the sensor. The information about the truth particle passage is also made available for later modules.
#### Parameters
* `physics_list`: Internal Geant4 list of physical processes to simulate. More information about possible physics list and recommendations for default is available [here](http://geant4.cern.ch/support/proc_mod_catalog/physics_lists/referencePL.shtml).
* `charge_creation_energy` : Energy needed to create a charge deposit. Defaults to the energy needed to create an electron-hole pair in silicon (3.64 eV).
* `max_step_length` : Maximum length of a simulation step in every sensitive device.
* `particle_position` : Position of the particle source in the world geometry.
* `particle_type` : Type of the Geant4 particle to use in the source. Refer to [this](http://geant4.cern.ch/G4UsersDocuments/UsersGuides/ForApplicationDeveloper/html/TrackingAndPhysics/particle.html) page for information about the available types of particles.
* `particle_radius_sigma` : Standard deviation of the radius from the particle source.
* `particle_direction` : Direction of the particle as a unit vector.
* `particle_energy` : Energy of the generated particle.
* `number_of_particles` : Number of particles to generate in a single event. Defaults to one particle.
#### Usage
A solid default configuration to use, simulating a test beam of 120 GeV pions, is the following:
```ini
[DepositionGeant4]
physics_list = QGSP_BERT
particle_type = "pi+"
particle_energy = 120GeV
particle_position = 0 0 -1mm
particle_direction = 0 0 1
number_of_particles = 1
```
<file_sep>/src/modules/DepositionGeant4/SensitiveDetectorActionG4.cpp
/**
* @file
* @brief Implements the handling of the sensitive device
* @remarks Based on code from <NAME>
* @copyright MIT License
*/
#include "SensitiveDetectorActionG4.hpp"
#include <memory>
#include "G4DecayTable.hh"
#include "G4HCofThisEvent.hh"
#include "G4LogicalVolume.hh"
#include "G4RunManager.hh"
#include "G4SDManager.hh"
#include "G4Step.hh"
#include "G4ThreeVector.hh"
#include "G4Track.hh"
#include "G4VProcess.hh"
#include "G4ios.hh"
#include "TMath.h"
#include "TString.h"
#include "core/utils/log.h"
#include "tools/ROOT.h"
#include "tools/geant4.h"
using namespace allpix;
SensitiveDetectorActionG4::SensitiveDetectorActionG4(Module* module,
const std::shared_ptr<Detector>& detector,
Messenger* msg,
double charge_creation_energy)
: G4VSensitiveDetector("SensitiveDetector_" + detector->getName()), module_(module), detector_(detector),
messenger_(msg), charge_creation_energy_(charge_creation_energy) {
// Add the sensor to the internal sensitive detector manager
G4SDManager* sd_man_g4 = G4SDManager::GetSDMpointer();
sd_man_g4->AddNewDetector(this);
}
G4bool SensitiveDetectorActionG4::ProcessHits(G4Step* step, G4TouchableHistory*) {
// Get the step parameters
auto edep = step->GetTotalEnergyDeposit();
G4StepPoint* preStepPoint = step->GetPreStepPoint();
G4StepPoint* postStepPoint = step->GetPostStepPoint();
// Put the charge deposit in the middle of the step
G4ThreeVector mid_pos = (preStepPoint->GetPosition() + postStepPoint->GetPosition()) / 2;
double mid_time = (preStepPoint->GetGlobalTime() + postStepPoint->GetGlobalTime()) / 2;
// Calculate the charge deposit at a local position
auto deposit_position = detector_->getLocalPosition(static_cast<ROOT::Math::XYZPoint>(mid_pos));
auto charge = static_cast<unsigned int>(edep / charge_creation_energy_);
// Save entry point for all first steps in volume
if(step->IsFirstStepInVolume()) {
track_parents_[step->GetTrack()->GetTrackID()] = step->GetTrack()->GetParentID();
// Search for the entry at the start of the sensor
auto track_id = step->GetTrack()->GetTrackID();
auto entry_position = detector_->getLocalPosition(static_cast<ROOT::Math::XYZPoint>(preStepPoint->GetPosition()));
while(track_parents_[track_id] != 0 &&
std::fabs(entry_position.z() - (detector_->getModel()->getSensorCenter().z() -
detector_->getModel()->getSensorSize().z() / 2.0)) > 1e-9) {
track_id = track_parents_[track_id];
entry_position = entry_points_[track_id];
}
entry_points_[step->GetTrack()->GetTrackID()] = entry_position;
}
// Add MCParticle for the last step in the volume if it is at the edge of the sensor
// FIXME Current method does not make sense if the incoming particle is not the same as the outgoing particle
if(step->IsLastStepInVolume() &&
std::fabs(detector_->getLocalPosition(static_cast<ROOT::Math::XYZPoint>(postStepPoint->GetPosition())).z() -
(detector_->getModel()->getSensorCenter().z() + detector_->getModel()->getSensorSize().z() / 2.0)) < 1e-9) {
// Add new MC particle track
auto local_entry = entry_points_[step->GetTrack()->GetTrackID()];
auto global_entry = detector_->getGlobalPosition(local_entry);
auto global_exit = static_cast<ROOT::Math::XYZPoint>(postStepPoint->GetPosition());
auto local_exit = detector_->getLocalPosition(global_exit);
mc_particles_.emplace_back(
local_entry, global_entry, local_exit, global_exit, step->GetTrack()->GetDynamicParticle()->GetPDGcode());
id_to_particle_[step->GetTrack()->GetTrackID()] = static_cast<unsigned int>(mc_particles_.size() - 1);
}
// Add new deposit if the charge is more than zero
if(charge == 0) {
return false;
}
auto global_deposit_position = detector_->getGlobalPosition(deposit_position);
deposits_.emplace_back(deposit_position, global_deposit_position, CarrierType::ELECTRON, charge, mid_time);
deposits_.emplace_back(deposit_position, global_deposit_position, CarrierType::HOLE, charge, mid_time);
// FIXME how do we correlate them?
deposit_ids_.emplace_back(step->GetTrack()->GetTrackID());
deposit_ids_.emplace_back(step->GetTrack()->GetTrackID());
LOG(DEBUG) << "Created deposit of " << charge << " charges at " << display_vector(mid_pos, {"mm", "um"})
<< " locally on " << display_vector(deposit_position, {"mm", "um"}) << " in " << detector_->getName()
<< " after " << Units::display(mid_time, {"ns", "ps"});
return true;
}
unsigned int SensitiveDetectorActionG4::getTotalDepositedCharge() {
return total_deposited_charge_;
}
void SensitiveDetectorActionG4::dispatchDepositedChargeMessage() {
// Always send the track information
auto mc_particle_message = std::make_shared<MCParticleMessage>(std::move(mc_particles_), detector_);
messenger_->dispatchMessage(module_, mc_particle_message);
// Create new mc particle vector
mc_particles_ = std::vector<MCParticle>();
// Send a new message if we have any deposits
if(!deposits_.empty()) {
IFLOG(INFO) {
unsigned int charges = 0;
for(auto& ch : deposits_) {
charges += ch.getCharge();
total_deposited_charge_ += ch.getCharge();
}
LOG(INFO) << "Deposited " << charges << " charges in sensor of detector " << detector_->getName();
}
// Match deposit with mc particle if possible
for(size_t i = 0; i < deposits_.size(); ++i) {
auto iter = id_to_particle_.find(deposit_ids_.at(i));
if(iter != id_to_particle_.end()) {
deposits_.at(i).setMCParticle(&mc_particle_message->getData().at(iter->second));
}
}
// Create a new charge deposit message
auto deposit_message = std::make_shared<DepositedChargeMessage>(std::move(deposits_), detector_);
// Dispatch the message
messenger_->dispatchMessage(module_, deposit_message);
// Make a new empty vector of deposits
deposits_ = std::vector<DepositedCharge>();
deposit_ids_.clear();
}
id_to_particle_.clear();
// Clear track parents and entry point list
track_parents_.clear();
entry_points_.clear();
}
<file_sep>/src/modules/RCEWriter/RCEWriterModule.cpp
/**
* @file
* @brief Implementation of RCE Writer Module
* @copyright MIT License
*/
#include "RCEWriterModule.hpp"
#include <string>
#include <utility>
#include <TBranchElement.h>
#include <TClass.h>
#include <TDirectory.h>
#include "core/utils/log.h"
#include "core/utils/type.h"
#include "objects/Object.hpp"
#include "objects/objects.h"
using namespace allpix;
RCEWriterModule::RCEWriterModule(Configuration config, Messenger* messenger, GeometryManager* geo_mgr)
: Module(config), config_(std::move(config)), geo_mgr_(geo_mgr) {
// Bind to PixelHitMessage
messenger->bindMulti(this, &RCEWriterModule::pixel_hit_messages_);
}
RCEWriterModule::~RCEWriterModule() = default;
void RCEWriterModule::init() {
// Create output file
std::string file_name = getOutputPath(config_.get<std::string>("file_name", "rce_data") + ".root", true);
output_file_ = std::make_unique<TFile>(file_name.c_str(), "RECREATE");
output_file_->cd();
// Initialize the events tree
event_tree_ = std::make_unique<TTree>("Event", "");
event_tree_->Branch("TimeStamp", ×tamp_);
event_tree_->Branch("FrameNumber", &frame_number_);
event_tree_->Branch("TriggerOffset", &trigger_offset_);
event_tree_->Branch("TriggerInfo", &trigger_info_);
event_tree_->Branch("TriggerTime", &trigger_time_);
event_tree_->Branch("Invalid", &invalid_);
// Get the detector names
for(const auto& detector : geo_mgr_->getDetectors()) {
detector_names_.push_back(detector->getName());
}
// Sort the detector names
std::sort(detector_names_.begin(), detector_names_.end());
// For each detector name, initialze an instance of SensorData
int det_index = 0;
for(const auto& detector_name : detector_names_) {
auto& sensor = sensors_[detector_name];
// Create directories for each detector
det_dir_name = "Plane" + std::to_string(det_index);
TDirectory* detector = output_file_->mkdir(det_dir_name.c_str());
detector->cd();
det_index += 1;
// Initialize the struct for each detector
sensor.tree = std::make_unique<TTree>("Hits", "");
LOG(TRACE) << "Detector name is: " << detector_name;
// initialze tree branches for each instance of the sensorData
sensor.tree->Branch("NHits", &sensor.nhits_);
sensor.tree->Branch("PixX", &sensor.pix_x_, "PixX[NHits]/I");
sensor.tree->Branch("PixY", &sensor.pix_y_, "PixY[NHits]/I");
sensor.tree->Branch("Value", &sensor.value_, "Value[NHits]/I");
sensor.tree->Branch("Timing", &sensor.timing_, "Timing[NHits]/I");
sensor.tree->Branch("HitInCluster", &sensor.hit_in_cluster_, "HitInCluster[NHits]/I");
}
}
void RCEWriterModule::run(unsigned int event_id) {
// fill per-event data
timestamp_ = 0;
frame_number_ = event_id;
trigger_offset_ = 0;
trigger_info_ = 0;
trigger_time_ = 0;
invalid_ = false;
LOG(TRACE) << "Writing new objects to the Events tree";
// Fill the events tree
event_tree_->Fill();
// Loop over all the detectors
for(const auto& detector_name : detector_names_) {
// reset nhits
auto& sensor = sensors_[detector_name];
sensor.nhits_ = 0;
}
// Loop over the pixel hit messages
for(const auto& hit_msg : pixel_hit_messages_) {
std::string detector_name = hit_msg->getDetector()->getName();
auto& sensor = sensors_[detector_name];
// Loop over all the hits
for(const auto& hit : hit_msg->getData()) {
int i = sensor.nhits_;
// Fill the tree with received messages
sensor.nhits_ += 1;
sensor.pix_x_[i] = static_cast<Int_t>(hit.getPixel().getIndex().x()); // NOLINT
sensor.pix_y_[i] = static_cast<Int_t>(hit.getPixel().getIndex().y()); // NOLINT
sensor.value_[i] = static_cast<Int_t>(hit.getSignal()); // NOLINT
// Set the Timing and HitInCluster for each sesnor_tree (= 0 for now)
sensor.timing_[i] = 0; // NOLINT
sensor.hit_in_cluster_[i] = 0; // NOLINT
LOG(TRACE) << "Detector Name: " << detector_name << ", X: " << hit.getPixel().getIndex().x()
<< ", Y:" << hit.getPixel().getIndex().y() << ", Signal: " << hit.getSignal();
}
}
// Loop over all the detectors to fill all corresponding sensor trees
for(const auto& detector_name : detector_names_) {
LOG(TRACE) << "Writing new objects to the Sensor Tree for " << detector_name;
sensors_[detector_name].tree->Fill();
}
}
void RCEWriterModule::finalize() {
LOG(TRACE) << "Writing objects to file";
// Finish writing to the output file
output_file_->Write();
}
<file_sep>/src/modules/GenericPropagation/README.md
## GenericPropagation
**Maintainer**: <NAME> (<<EMAIL>>), <NAME> (<<EMAIL>>)
**Status**: Functional
**Input**: DepositedCharge
**Output**: PropagatedCharge
#### Description
Simulates generic propagation of electrons (ignoring the corresponding holes) through the sensitive devices of every detector. Splits up the set of deposited charges in multiple smaller sets of charges (containing multiple charges) that are propagated together. The propagation process is fully independent, the individual sets of propagated charges do not influence each other. The maximum size of the set of propagated charges and the accuracy of the propagation can be controlled.
The propagation consists of a combination of drift and diffusion simulation. The drift is calculated using the charge carrier velocity derived from the electron mobility parameterization by C. Jacobini et al. in (A review of some charge transport properties of silicon)[https://doi.org/10.1016/0038-1101(77)90054-5]. The correct mobility for either electrons or holes is automatically chosen, based on the type of the charge carrier under consideration. Thus, also input with both electrons and holes is treated properly.
The two parameters `propagate_electrons` and `propagate_holes` allow to control, which type of charge carrier is propagated to the electrodes. Either one of the carrier types can be selected, or both can be propagated. It should be noted that this will slow down the simulation considerably since twice as many carriers have to be handled and it should only be used where sensible.
An fourth-order Runge-Kutta-Fehlberg method with fifth-order error estimation is used to integrate the electric field. After every Runge-Kutta step a random walk is simulated by applying Gaussian diffusion calculated from the electron mobility, the temperature and the time step. The propagation stops when the set of charges reaches the border of the sensor.
The propagation module also produces a variety of output plots for debugging and publication purposes. The plots include a 3D line plot of the path of all separate propagated charges from their deposits, with nearby paths having different colors. It also outputs an 3D GIF animation of all the individual set of charges (with the size of the point proportional to the number of charges in the set). Finally it produces 2D contour animations in all the planes normal to the X, Y and Z axis, showing the concentration flow in the sensor.
#### Parameters
* `temperature` : Temperature in the sensitive device, used to estimate the diffusion constant and therefore the strength of the diffusion.
* `charge_per_step` : Maximum number of charges to propagate together. Divides the total deposited charge at a specific point in sets of this number of charges and a set with the remaining amount of charges. A value of 10 charges per step is used if this value is not specified.
* `spatial_precision` : Spatial precision to aim for. The timestep of the Runge-Kutta propagation is adjusted to reach this spatial precision after calculating the error from the fifth-order error method. Defaults to 0.1nm.
* `timestep_start` : Timestep to initialize the Runge-Kutta integration with. Better initialization of this parameter reduces the time to optimize the timestep to the *spatial_precision* parameter. Default value is 0.01ns.
* `timestep_min` : Minimum step in time to use for the Runge-Kutta integration regardless of the spatial precision. Defaults to 0.5ps.
* `timestep_max` : Maximum step in time to use for the Runge-Kutta integration regardless of the spatial precision. Defaults to 0.1ns.
* `integration_time` : Time within which charge carriers are propagated. After exceeding this time, no further propagation is performed for the respective carriers. Defaults to the LHC bunch crossing time of 25ns.
* `propagate_electrons` : Select whether electron-type charge carriers should be propagated to the electrodes. Defaults to true.
* `propagate_holes` : Select whether hole-type charge carriers should be propagated to the electrodes. Defaults to false.
* `output_plots` : Determines if output plots should be generated for every event. This causes a very huge slow down of the simulation, it is not recommended to use this with a run of more than a single event. Disabled by default.
* `output_animation` : In addition to the other output plots, also write a GIF animation of the charges drifting towards the electrodes. This is very slow and writing the animation takes a considerable amount of time.
* `output_plots_step` : Timestep to use between two points that are plotted. Indirectly determines the amount of points plotted. Defaults to *timestep_max* if not explicitly specified.
* `output_plots_theta` : Viewpoint angle of the 3D animation and the 3D line graph around the world Z-axis. Defaults to zero.
* `output_plots_phi` : Viewpoint angle of the 3D animation and the 3D line graph around the world X-axis. Defaults to zero.
* `output_plots_use_pixel_units` : Determines if the plots should use pixels as unit instead of metric length scales. Defaults to false (thus using the metric system).
* `output_plots_use_equal_scaling` : Determines if the plots should be produced with equal distance scales on every axis (also if this implies that some points will fall out of the graph). Defaults to true.
* `output_plots_animation_time_scaling` : Scaling for the animation to use to convert the actual simulation time to the time step in the animation. Defaults to 1.0e9, meaning that every nanosecond is equal to an animation step of a single second.
* `output_plots_contour_max_scaling` : Scaling to use for the contour color axis from the theoretical maximum charge at every single plot step. Default is 10, meaning that the maximum of the color scale axis is equal to the total amount of charges divided by ten (values above this are displayed in the same maximum color). Parameter can be used to improve the color scale of the contour plots.
#### Usage
A example of generic propagation for all Timepix sensors at room temperature using packets of 25 charges is the following:
```
[GenericPropagation]
type = "timepix"
temperature = 293K
charge_per_step = 25
```
<file_sep>/src/modules/DepositionGeant4/SensitiveDetectorActionG4.hpp
/**
* @file
* @brief Defines the handling of the sensitive device
* @copyright MIT License
*/
#ifndef ALLPIX_SIMPLE_DEPOSITION_MODULE_SENSITIVE_DETECTOR_ACTION_H
#define ALLPIX_SIMPLE_DEPOSITION_MODULE_SENSITIVE_DETECTOR_ACTION_H
#include <memory>
#include <G4VSensitiveDetector.hh>
#include <G4WrapperProcess.hh>
#include "core/geometry/Detector.hpp"
#include "core/messenger/Messenger.hpp"
#include "objects/DepositedCharge.hpp"
#include "objects/MCParticle.hpp"
namespace allpix {
/**
* @brief Handles the steps of the particles in all sensitive devices
*/
class SensitiveDetectorActionG4 : public G4VSensitiveDetector {
public:
/**
* @brief Constructs the action handling for every sensitive detector
* @param module Pointer to the DepositionGeant4 module holding this class
* @param detector Detector this sensitive device is bound to
* @param msg Pointer to the messenger to send the charge deposits
* @param charge_creation_energy Energy needed per deposited charge
*/
SensitiveDetectorActionG4(Module* module,
const std::shared_ptr<Detector>& detector,
Messenger* msg,
double charge_creation_energy);
/**
* @brief Get total of charges deposited in the sensitive device bound to this action
*/
unsigned int getTotalDepositedCharge();
/**
* @brief Process a single step of a particle passage through this sensor
* @param step Information about the step
* @param history Parameter not used
*/
G4bool ProcessHits(G4Step* step, G4TouchableHistory* history) override;
/**
* @brief Send the DepositedCharge Message
*/
void dispatchDepositedChargeMessage();
private:
// Instantatiation of the deposition module
Module* module_;
std::shared_ptr<Detector> detector_;
Messenger* messenger_;
double charge_creation_energy_;
// Statistics of total deposited charge
unsigned int total_deposited_charge_{};
// Set of deposited charges in this event
std::vector<DepositedCharge> deposits_;
// List of ids for every deposit
std::vector<int> deposit_ids_;
// List of entry points for all tracks
std::map<int, ROOT::Math::XYZPoint> entry_points_;
// Parent of all tracks
std::map<int, int> track_parents_;
// List of all MC particles
std::vector<MCParticle> mc_particles_;
// Conversions from id to particle index
std::map<int, unsigned int> id_to_particle_;
};
} // namespace allpix
#endif /* ALLPIX_SIMPLE_DEPOSITION_MODULE_SENSITIVE_DETECTOR_ACTION_H */
<file_sep>/README.md
[![](doc/logo_small.png)](https://cern.ch/allpix-squared)
# Allpix<sup>2</sup>
Generic simulation framework for pixel detectors based on [AllPix](https://github.com/AllPix/allpix).
For more details about the project please have a look at the website at https://cern.ch/allpix-squared.
[![build status](https://gitlab.cern.ch/simonspa/allpix-squared/badges/master/build.svg)](https://gitlab.cern.ch/simonspa/allpix-squared/commits/master)
[![coverity status](https://scan.coverity.com/projects/11975/badge.svg)](https://scan.coverity.com/projects/koensw-allpix-squared)
## Dependencies
* [ROOT](https://root.cern.ch/building-root) (required, with the GenVector component)
* [Geant4](http://geant4.web.cern.ch/geant4/UserDocumentation/UsersGuides/InstallationGuide/html/ch02.html) (optional, but required for typical purposes)
* [Eigen3](http://eigen.tuxfamily.org/index.php?title=Main_Page) (optional, but required for typical purposes)
## Installation
The CMake build system is used for compilation and installation. The install directory can be specified by adding `-DCMAKE_INSTALL_PREFIX=<prefix>` as argument to the CMake command below. Other configuration options are explained in the manual (see the documentation section below).
The dependencies need to be initialized for the build to succeed. Currently there are two methods to load these:
### Prerequisites on CERN LXPLUS
In order to install Allpix<sup>2</sup> on the CERN LXPLUS batch cluster, a LXPLUS setup script is provided:
```
$ source etc/scripts/setup_lxplus.sh
```
Then, continue as described under the compilation section.
### Prerequisites on a private machine
The dependencies listen above have to be satisfied. Both ROOT6 and Geant4 libraries and headers have to be in the path, this is usually achieved by sourcing the `thisroot.sh` or `geant4.sh` scripts. After this, continue as described below.
### Compilation
To compile and install a default installation of Allpix<sup>2</sup>, run the following commands
```
$ mkdir build && cd build/
$ cmake ..
$ make install
```
For more detailed installation instructions, please refer to the documentation below.
## Documentation
A recent version of the User's Manual is available online [here](https://project-allpix-squared.web.cern.ch/project-allpix-squared/usermanual/allpix-manual.pdf). Some parts of the manual are still mising, but the fast majority of the sections are fairly complete. The Doxygen reference can also be viewed [online](https://project-allpix-squared.web.cern.ch/project-allpix-squared/reference/html/).
The latest PDF version of the User's Manual can also be created from source by executing
```
$ make pdf
```
After running the manual is available under `usermanual/allpix-manual.pdf` in the build directory.
To build the HTML version of the latest Doxygen reference, run the following command
```
$ make reference
```
The main page of the reference can then be found at `reference/html/index.html` in the build folder.
## Contributing
All type of minor and major contributions are very welcome. Please refer to our [contributing guidelines](CONTRIBUTING.md) for a description on how to get started.
Before adding changes it is very much recommended to carefully read through the documentation first.
<file_sep>/src/core/config/ConfigManager.cpp
/**
* @file
* @brief Implementation of config manager
* @copyright MIT License
*/
#include "ConfigManager.hpp"
#include <fstream>
#include <string>
#include <vector>
#include "Configuration.hpp"
#include "core/utils/file.h"
#include "core/utils/log.h"
#include "exceptions.h"
using namespace allpix;
/**
* @throws ConfigFileUnavailableError If the main configuration file cannot be accessed
*/
ConfigManager::ConfigManager(std::string file_name) : file_name_(std::move(file_name)) {
LOG(TRACE) << "Using " << file_name_ << " as main configuration file";
// Check if the file exists
std::ifstream file(file_name_);
if(!file) {
throw ConfigFileUnavailableError(file_name_);
}
// Convert main file to absolute path
file_name_ = allpix::get_absolute_path(file_name_);
// Read the file
reader_.add(file, file_name_);
}
/**
* @warning Only one header can be added in this way to define its name
*/
void ConfigManager::setGlobalHeaderName(std::string name) {
global_names_.emplace(name);
global_default_name_ = std::move(name);
}
void ConfigManager::addGlobalHeaderName(std::string name) {
global_names_.emplace(std::move(name));
}
/**
* The global configuration is the combination of all sections with a global header.
*/
Configuration ConfigManager::getGlobalConfiguration() {
Configuration global_config(global_default_name_, file_name_);
for(auto& global_name : global_names_) {
auto configs = reader_.getConfigurations(global_name);
for(auto& config : configs) {
global_config.merge(config);
}
}
return global_config;
}
void ConfigManager::addIgnoreHeaderName(std::string name) {
ignore_names_.emplace(std::move(name));
}
bool ConfigManager::hasConfiguration(const std::string& name) {
return reader_.hasConfiguration(name);
}
/**
* All special global and ignored sections are removed before returning the rest of the configurations. The list of normal
* sections is used by the ModuleManager to instantiate all the required modules.
*/
std::vector<Configuration> ConfigManager::getConfigurations() const {
std::vector<Configuration> result;
for(auto& config : reader_.getConfigurations()) {
// ignore all global and ignores names
if(global_names_.find(config.getName()) != global_names_.end() ||
ignore_names_.find(config.getName()) != ignore_names_.end()) {
continue;
}
result.push_back(config);
}
return result;
}
<file_sep>/src/core/geometry/MonolithicPixelDetectorModel.hpp
/**
* @file
* @brief Parameters of a monolithic pixel detector model
*
* @copyright MIT License
*/
#ifndef ALLPIX_MONOLITHIC_PIXEL_DETECTOR_H
#define ALLPIX_MONOLITHIC_PIXEL_DETECTOR_H
#include <string>
#include <utility>
#include <Math/Cartesian2D.h>
#include <Math/DisplacementVector2D.h>
#include <Math/Point3D.h>
#include <Math/Vector2D.h>
#include <Math/Vector3D.h>
#include "DetectorModel.hpp"
namespace allpix {
/**
* @ingroup DetectorModels
* @brief Model of a monolithic pixel detector. This a model where the sensor is directly connected to the chip.
*
* This model is basically already fully implemented in the \ref DetectorModel base class.
*/
class MonolithicPixelDetectorModel : public DetectorModel {
public:
/**
* @brief Constructs the monolithic pixel detector model
* @param type Name of the model type
* @param reader Configuration reader with description of the model
*/
explicit MonolithicPixelDetectorModel(std::string type, const ConfigReader& reader)
: DetectorModel(std::move(type), reader) {}
};
} // namespace allpix
#endif /* ALLPIX_MONOLITHIC_PIXEL_DETECTOR_H */
<file_sep>/src/modules/GeometryBuilderTGeo/GeometryBuilderTGeoModule.cpp
/// \file GeometryBuilderTGeoModule.cpp
/// \brief Implementation of the GeometryBuilderTGeoModule class
/// \author <NAME>
/*
To be discussed :
- Shall the algo stops if a geometry is already loaded ?
- Do we want a CheckOverlaps option ? Or we use ROOT's tools offline on the TFile.
Stop in case of overlap ?
- GeometryBuilderTGeoModule also responsible for loading the geometry ?
Colors :
kOrange+1 : experimental hall
kRed : wrapper
kCyan : Wafer, pixels
kGreen : support, bumps container volumes
kYellow : Bump logical volume
kGray : Chip, GuardRings (+2)
kBlack : Appliances
*/
// Local includes
#include "GeometryBuilderTGeoModule.hpp"
// Global includes
#include <fstream>
#include <iostream>
#include <memory>
#include <string>
#include <utility>
// ROOT
#include <Math/EulerAngles.h>
#include <Math/Vector3D.h>
#include <TColor.h>
#include <TFile.h>
#include <TGeoBBox.h>
#include <TGeoCompositeShape.h>
#include <TGeoSphere.h>
#include <TGeoTube.h>
#include <TMath.h>
#include <TROOT.h>
// AllPix includes
#include "core/config/ConfigReader.hpp"
#include "core/config/exceptions.h"
#include "core/geometry/GeometryManager.hpp"
#include "core/utils/log.h"
// Tools
#include "tools/ROOT.h"
using namespace std;
using namespace allpix;
using namespace ROOT::Math;
using namespace TMath;
/**
* @brief Create a TGeoTranslation from a ROOT::Math::XYZVector
*/
TGeoTranslation ToTGeoTranslation(const XYZPoint& pos) {
return TGeoTranslation(pos.x(), pos.y(), pos.z());
}
/**
* @brief Print out a TGeoTranslation as done in allpix for easy comparison.
*/
TString Print(TGeoTranslation* trl) {
const Double_t* par = trl->GetTranslation();
return Form("(%f,%f,%f)", par[0], par[1], par[2]);
}
GeometryBuilderTGeoModule::GeometryBuilderTGeoModule(Configuration config, Messenger*, GeometryManager* geo_manager)
: Module(config), m_config(std::move(config)), m_geoDscMng(geo_manager), m_fillingWorldMaterial(nullptr),
m_userDefinedWorldMaterial("Air"), m_userDefinedGeoOutputFile(""), m_buildAppliancesFlag(false), m_Appliances_type(0),
m_buildTestStructureFlag(false) {
// Read the configuration
m_userDefinedWorldMaterial = m_config.get<std::string>("world_material");
m_buildAppliancesFlag = m_config.get<bool>("build_appliances", false);
if(m_buildAppliancesFlag) {
m_Appliances_type = m_config.get<int>("appliances_type");
}
m_buildTestStructureFlag = m_config.get<bool>("build_test_structures", false);
}
void GeometryBuilderTGeoModule::init() {
/* Instantiate the TGeo geometry manager.
It will remain persistant until gGeoManager is deleted.
*/
gGeoManager = new TGeoManager("AllPix2", "Detector geometry");
/* Set Verbosity according to the framework. Verbose only in debug mode.
ROOT : 0=mute, 1=verbose
LogLevel { QUIET = 0, CRITICAL, ERROR, WARNING, INFO, DEBUG }; */
gGeoManager->SetVerboseLevel(int(Log::getReportingLevel()) < 5 ? 0 : 1);
// Build detectors.
Construct();
// Close the Geometry
gGeoManager->CloseGeometry();
//### Visualisation Development only
// gGeoManager->SetTopVisible(); // the TOP is invisible by default
gGeoManager->SetVisLevel(4);
// gGeoManager->SetVisOption(0); // To see the intermediate containers.
// gGeoManager->GetVolume("name");
// TGeoVolume* top = gGeoManager->GetTopVolume();
// top->Draw();
// gGeoManager->CheckOverlaps(0.1);
// Save geometry in ROOT file.
if(m_config.has("output_file")) {
m_userDefinedGeoOutputFile = getOutputPath(m_config.get<string>("output_file"));
if(!m_userDefinedGeoOutputFile.EndsWith(".root")) {
m_userDefinedGeoOutputFile += ".root";
}
gGeoManager->Export(m_userDefinedGeoOutputFile); // ("file.root","","update") ??
LOG(DEBUG) << "Geometry saved in " << m_userDefinedGeoOutputFile;
}
// Export geometry as GDML if required
if(m_config.has("GDML_output_file")) {
#ifndef ROOT_GDML
std::string error = "You requested to export the geometry in GDML. ";
error += "However, GDML support is currently disabled in ROOT. ";
error += "To enable it, configure and compile ROOT with the option -Dgdml=ON.";
throw allpix::InvalidValueError(m_config, "GDML_output_file", error);
#else
TString GDML_output_file = getOutputPath(m_config.get<string>("GDML_output_file"));
if(!GDML_output_file.EndsWith(".gdml")) {
GDML_output_file += ".gdml";
}
gGeoManager->Export(GDML_output_file);
#endif
}
}
void GeometryBuilderTGeoModule::Construct() {
// Solids will be builds in mm, same units as AllPix1, even if ROOT assumes cm.
// Beware when computing shape capacity or volume weight.
LOG(TRACE) << "Starting construction of the detector geometry.";
// Create the materials and media.
BuildMaterialsAndMedia();
/* Creating the world volume, ie experimental hall
The size of the world does not seem to have any effect. Even if smaller than
the built detectors, ROOT does not complain.
*/
const XYZVector halfworld = m_config.get("world_size", XYZVector(1000, 1000, 1000)) / 2.0;
m_fillingWorldMaterial = gGeoManager->GetMedium(m_userDefinedWorldMaterial);
// If null, throw an exception and stop the construction !
if(m_fillingWorldMaterial == nullptr) {
throw ModuleError("Material " + std::string(m_userDefinedWorldMaterial) +
" requested to fill the world volume does not exist");
} else {
LOG(TRACE) << "Using " << m_userDefinedWorldMaterial << " to fill the world volume.";
}
// World volume, ie the experimental hall.
TGeoVolume* expHall_log =
gGeoManager->MakeBox("ExpHall", m_fillingWorldMaterial, halfworld.x(), halfworld.y(), halfworld.z());
// expHall_log->SetTransparency(100);
// G4Color(1.0, 0.65, 0.0, 0.1)->kOrange+1, SetVisibility(false), SetForceSolid(false)
expHall_log->SetLineColor(kOrange + 1);
gGeoManager->SetTopVolume(expHall_log);
// Build the pixel detectors
BuildPixelDevices();
// Build appliances
if(m_buildAppliancesFlag) {
BuildAppliances();
}
// Build test structures
if(m_buildTestStructureFlag) {
BuildTestStructure();
}
LOG(TRACE) << "Construction of the detector geometry successful.";
}
void GeometryBuilderTGeoModule::BuildPixelDevices() {
LOG(TRACE) << "Starting construction of the pixel detectors.";
vector<shared_ptr<Detector>> detectors = m_geoDscMng->getDetectors();
LOG(TRACE) << "Building " << detectors.size() << " device(s) ...";
// Big loop on pixel detectors.
auto detItr = detectors.begin();
for(; detItr != detectors.end(); detItr++) {
shared_ptr<HybridPixelDetectorModel> dsc = dynamic_pointer_cast<HybridPixelDetectorModel>((*detItr)->getModel());
string detname = (*detItr)->getName();
// TString id_s = Form("_%i", id);
TString id_s = "_";
id_s += detname;
LOG(DEBUG) << "Start building detector " << detname;
///////////////////////////////////////////////////////////
// wrapper
// The wrapper might be enhanced when the user set up
// Appliances to the detector (extra layers, etc).
double wrapperHX = dsc->getSize().x() / 2.0;
double wrapperHY = dsc->getSize().y() / 2.0;
double wrapperHZ = dsc->getSize().z() / 2.0;
// Apply the enhancement to the medipixes (to contain possible appliances)
// We can have N medipixes and K enhancements, where K<=N.
// For instance, for 2 medipixes. We can have.
// medipix 1 --> with enhancement
// medipix 2 --> no enhancement
TGeoTranslation wrapperEnhancementTransl = TGeoTranslation("WrapperEnhancementTransl", 0., 0., 0.);
if(m_vectorWrapperEnhancement.find(detname) != m_vectorWrapperEnhancement.end()) {
wrapperHX += m_vectorWrapperEnhancement[detname].x() / 2.; // half
wrapperHY += m_vectorWrapperEnhancement[detname].y() / 2.;
wrapperHZ += m_vectorWrapperEnhancement[detname].z() / 2.;
wrapperEnhancementTransl.SetDx(m_vectorWrapperEnhancement[detname].x() / 2.);
wrapperEnhancementTransl.SetDy(m_vectorWrapperEnhancement[detname].y() / 2.);
wrapperEnhancementTransl.SetDz(m_vectorWrapperEnhancement[detname].z() / 2.);
}
// The wrapper logical volume
TGeoVolume* wrapper_log =
gGeoManager->MakeBox(WrapperName + id_s, m_fillingWorldMaterial, 2. * wrapperHX, 2. * wrapperHY, 2. * wrapperHZ);
// G4Color(1,0,0,0.9)->kRed, SetLineWidth(1), SetForceSolid(false), SetVisibility(false)
wrapper_log->SetLineColor(kRed);
// Placement ! Retrieve position given by the user.
TGeoTranslation posWrapper = ToTGeoTranslation((*detItr)->getPosition());
// Apply wrapper enhancement
posWrapper.Add(&wrapperEnhancementTransl);
// Retrieve orientation given by the user.
EulerAngles angles = (*detItr)->getOrientation();
const double phi = angles.Phi() * RadToDeg();
const double theta = angles.Theta() * RadToDeg();
const double psi = angles.Psi() * RadToDeg();
TGeoRotation orWrapper = TGeoRotation("DetPlacement" + id_s, phi, theta, psi);
// And create a transformation.
auto* det_tr = new TGeoCombiTrans(posWrapper, orWrapper);
det_tr->SetName("DetPlacement" + id_s);
// Print out ! The wrapper will just be called "detector".
LOG(DEBUG) << " Detector placement relative to the World : ";
LOG(DEBUG) << " - Position : " << Print(&posWrapper);
LOG(DEBUG) << " - Orientation : " << TString::Format("%3.1f %3.1f %3.1f", phi, theta, psi);
LOG(DEBUG) << " - Wrapper Dimensions : " << TString::Format("%3.3f %3.3f %3.3f", wrapperHX, wrapperHY, wrapperHZ);
TGeoVolume* expHall_log = gGeoManager->GetTopVolume();
expHall_log->AddNode(wrapper_log, 1, det_tr);
///////////////////////////////////////////////////////////
// Device
// The Si wafer is placed respect to the wrapper.
// Needs to be pushed -half Si wafer in z direction
TGeoBBox* Wafer_box = new TGeoBBox(WaferName + id_s,
dsc->getSensorSize().x() / 2.0,
dsc->getSensorSize().y() / 2.0,
dsc->getSensorSize().z() / 2.0);
TGeoMedium* Si_med = gGeoManager->GetMedium("Si"); // Retrieve Silicon
TGeoVolume* Wafer_log = new TGeoVolume(WaferName + id_s, Wafer_box, Si_med);
// G4Color(0,1,1,1)->kCyan, SetLineWidth(2), SetForceSolid(true);
Wafer_log->SetLineColor(kCyan);
Wafer_log->SetLineWidth(2);
// Wafer_log->SetVisibility(true);
///////////////////////////////////////////////////////////
// slices and pixels
// Replication along X axis, creation of a family.
// Option "N" tells to divide the whole axis range into NPixelsX.
// Start and step arguments are dummy.
TGeoVolume* Slice_log = Wafer_log->Divide(SliceName + id_s, 1, dsc->getNPixels().x(), 0, 1, 0, "N");
// Slice_log->SetVisibility(false);
// Replication along Y axis
TGeoVolume* Pixel_log = Slice_log->Divide(PixelName + id_s, 2, dsc->getNPixels().y(), 0, 1, 0, "N");
Pixel_log->SetLineColor(kCyan);
// Pixel_log->SetVisibility(false);
/*
The path to the corresponding nodes will be
Wafer_id_1\Slice_id_[1,NPixelsX]\Pixel_id_[1,NPixelsY]
*/
// Placement of the Device (Wafer), containing the pixels
TGeoTranslation* posDevice = new TGeoTranslation("LocalDevTranslation" + id_s, 0., 0., 0.);
// Apply position Offset for the detector due to the enhancement
posDevice->Add(&wrapperEnhancementTransl);
wrapper_log->AddNode(Wafer_log, 1, posDevice);
LOG(DEBUG) << " Relative positions of the elements to the detector :";
LOG(DEBUG) << " - Sensor position : " << Print(posDevice);
///////////////////////////////////////////////////////////
// Bumps
// Bump = Bump_Sphere + Bump_Tube
// Naming AllPix Allpix2
// Bump_Box -> None
// m_Bumps_log -> Bumps_log
// m_Bumps_phys -> None
// aBump -> Bump
// aBump_Sphere -> Bump_Sphere
// aBump_Tube -> Bump_Tube
// m_Bumps_Cell_log -> Bumps
double bump_height = dsc->getBumpHeight();
if(bump_height != 0. && dsc->getChipSize().z() / 2.0 != 0.) {
// Build the basic shapes
TString BumpSphereName = BumpName + "Sphere" + id_s;
new TGeoSphere(BumpSphereName,
0, // internal radius
dsc->getBumpSphereRadius() // ext radius
);
TString BumpTubeName = BumpName + "Tube" + id_s;
new TGeoTube(BumpTubeName,
0., // internal radius
// external radius
dsc->getBumpSphereRadius() - dsc->getBumpCylinderRadius(),
bump_height / 2.);
// Bump = Bump_Sphere + Bump_Tube
TGeoCompositeShape* Bump =
new TGeoCompositeShape(BumpName + "Shape" + id_s, BumpSphereName + "+" + BumpTubeName);
// The volume containing the bumps
TGeoVolume* Bumps_log = gGeoManager->MakeBox(BumpName + "Log" + id_s,
m_fillingWorldMaterial,
dsc->getSensorSize().x() / 2.0,
dsc->getSensorSize().y() / 2.0,
bump_height / 2.);
// G4Color(0,1,0,1.0)=kGreen, SetLineWidth(1), SetForceSolid(false),
// SetVisibility(true)
Bumps_log->SetLineColor(kGreen);
// Placement of the volume containing the bumps
TGeoTranslation* posBumps = new TGeoTranslation(
"LocalBumpsTranslation" + id_s, 0., 0., -dsc->getSensorSize().z() / 2.0 - (bump_height / 2));
posBumps->Add(posDevice);
LOG(DEBUG) << " - Bumps position : " << Print(posBumps);
wrapper_log->AddNode(Bumps_log, 1, posBumps);
// A bump logical volume
TGeoMedium* solder_med = gGeoManager->GetMedium("Solder");
TGeoVolume* Bumps = new TGeoVolume(BumpName + id_s, Bump, solder_med);
// G4Color::Yellow(), SetLineWidth(2), SetForceSolid(true)
Bumps->SetLineColor(kYellow);
Bumps->SetLineWidth(2);
// Replication and positionning of the bumps.
// Loop on x axis
for(int ix = 0; ix < dsc->getNPixels().x(); ++ix) {
// Loop on y axis
for(int iy = 0; iy < dsc->getNPixels().y(); ++iy) {
// Positions
double XPos = (ix * 2 + 1) * dsc->getPixelSize().x() / 2.0 - dsc->getSensorSize().x() / 2.0 +
(dsc->getBumpsCenter().x() - dsc->getCenter().x());
double YPos = (iy * 2 + 1) * dsc->getPixelSize().y() / 2.0 - dsc->getSensorSize().y() / 2.0 +
(dsc->getBumpsCenter().y() - dsc->getCenter().y());
TString xy_s = Form("_%i_%i", ix, iy);
TGeoTranslation* posBump = new TGeoTranslation("LocalBumpTranslation" + id_s + xy_s, XPos, YPos, 0.);
// Placement !
Bumps_log->AddNode(Bumps, ix + 1 + (iy * dsc->getNPixels().x()), posBump);
} // end loop y axis
} // end loop x axis
} // end if bumps
///////////////////////////////////////////////////////////
// Chip
// The Si wafer is placed respect to the wrapper.
// Needs to be pushed -half Si wafer in z direction
if(dsc->getChipSize().z() / 2.0 != 0) {
TGeoVolume* Chip_log = gGeoManager->MakeBox(ChipName + id_s,
Si_med,
dsc->getChipSize().x() / 2.0,
dsc->getChipSize().y() / 2.0,
dsc->getChipSize().z() / 2.0);
// G4Color::Gray(), SetLineWidth(2), SetForceSolid(true), SetVisibility(true)
Chip_log->SetLineColor(kGray);
Chip_log->SetLineWidth(2);
// Placement !
TGeoTranslation* posChip =
new TGeoTranslation("LocalChipTranslation" + id_s,
dsc->getChipCenter().x() - dsc->getCenter().x(),
dsc->getChipCenter().y() - dsc->getCenter().y(),
-dsc->getSensorSize().z() / 2.0 - bump_height - dsc->getChipSize().z() / 2.0);
posChip->Add(posDevice);
LOG(DEBUG) << " - Chip position : " << Print(posChip);
wrapper_log->AddNode(Chip_log, 1, posChip);
}
///////////////////////////////////////////////////////////
// support
// The support is placed respect to the wrapper.
// Needs to be pushed -half Si wafer in z direction
for(auto& layer : dsc->getSupportLayers()) {
// ALERT holes are not supported
// Retrieve Plexiglass
TGeoMedium* plexiglass_med = gGeoManager->GetMedium("Plexiglass");
// Create logical volume
TGeoVolume* support_log = gGeoManager->MakeBox(supportName + id_s,
plexiglass_med,
layer.getSize().x() / 2.0,
layer.getSize().y() / 2.0,
layer.getSize().z() / 2.0);
// G4Color::Green(), SetLineWidth(1), SetForceSolid(true)
support_log->SetLineColor(kGreen);
// Placement !
TGeoTranslation* possupport = new TGeoTranslation("LocalsupportTranslation" + id_s,
layer.getCenter().x() - dsc->getCenter().x(),
layer.getCenter().y() - dsc->getCenter().y(),
layer.getCenter().z() - dsc->getCenter().z());
possupport->Add(posDevice);
LOG(DEBUG) << " - Support position : " << Print(possupport);
wrapper_log->AddNode(support_log, 1, possupport);
}
///////////////////////////////////////////////////////////
// GuardRings and excess area
// Guard rings will be GuardRingsExt - Box
TString GuardRingsExtName = GuardRingsName + "Ext" + id_s;
new TGeoBBox(GuardRingsExtName,
dsc->getSensorSize().x() / 2.0,
dsc->getSensorSize().y() / 2.0,
// same depth as the sensor
dsc->getSensorSize().z() / 2.0);
TGeoCompositeShape* Solid_GuardRings = new TGeoCompositeShape(GuardRingsName + id_s,
// GuardRings = GuardRings_Ext - Wafer
GuardRingsExtName + "-" + Wafer_box->GetName());
// Create logical volume
TGeoVolume* GuardRings_log = new TGeoVolume(GuardRingsName + id_s, Solid_GuardRings, Si_med);
// G4Color(0.5,0.5,0.5,1)=kGray+2, SetLineWidth(1), SetForceSolid(true)
GuardRings_log->SetLineColor(kGray + 2);
// Placement ! Same as device
wrapper_log->AddNode(GuardRings_log, 1, posDevice);
LOG(TRACE) << "Building detector " << detname << " ... done.";
} // Big loop on detector descriptions
LOG(TRACE) << "Construction of the pixel detectors successful.";
}
void GeometryBuilderTGeoModule::BuildAppliances() {
// Through the comand
// --> /allpix/extras/setAppliancePosition
// you can fill the vector "m_posVectorAppliances" available in this scope.
// This vector holds the positions of the appliances volumes which can be placed with
// respect to the wrapper. This way your appliance properly rotates
// with the detector.
// Through the comand
// --> /allpix/extras/setWrapperEnhancement
// you can enhance the size of the wrapper so daughter volumens of the wrappers
// fit in.
LOG(TRACE) << "Starting construction of the appliances " << m_Appliances_type;
// Check that appliance type is valid.
if(m_Appliances_type < 0 || m_Appliances_type > 1) {
LOG(ERROR) << "Unknown Appliance Type : " << m_Appliances_type
<< "Available types are 0,1. Set /allpix/extras/setApplianceType accordingly."
<< "Quitting...";
return;
}
// Check that we have some position vectors for the appliances.
if(m_posVectorAppliances.empty()) {
LOG(ERROR) << "You requested to build appliances, but no translation vector given."
<< "Please, set /allpix/extras/setAppliancePosition accordingly."
<< "Abandonning...";
return;
}
// Retrieve medium, ie aluminium.
TGeoMedium* Al = gGeoManager->GetMedium("Al");
// Build shapes and translations according to the requested type.
TString comp; // The composition of shapes.
TGeoTranslation* ApplTransl = nullptr; // Type-depending Translation vector.
if(m_Appliances_type == 0) {
new TGeoBBox("AppBoxSup", 87. / 2, 79. / 2, 5);
new TGeoBBox("AppBoxSupN", 72. / 2, 54. / 2, 8.);
new TGeoBBox("AppBoxSupN2", 52. / 2, 54. / 2, 5.);
auto* BoxSupN2Transl = new TGeoTranslation("AppBoxSupN2Translation", 0., 44.5, 4.);
BoxSupN2Transl->RegisterYourself();
comp = "(AppBoxSup-AppBoxSupN)-AppBoxSupN2:AppBoxSupN2Translation";
// Type depending translation vectors, with respect to the wrapper.
ApplTransl = new TGeoTranslation("ApplianceTransl", 0., 10.25, 0.);
} else if(m_Appliances_type == 1) {
// Empty Aluminium box with a window.
// Create the composite shape. mm !
new TGeoBBox("AppBoxOut", 54. / 2, 94.25 / 2, 12. / 2);
new TGeoBBox("AppBoxIn", 52.5 / 2, 92.5 / 2, 12. / 2);
new TGeoBBox("AppWindow", 10., 10., 1.5);
auto BoxInTransl = new TGeoTranslation("AppBoxInTranslation", 0., 0., -1.5);
BoxInTransl->RegisterYourself();
auto* WindowTransl = new TGeoTranslation("AppWindowTranslation", 0., -22.25, 6.);
WindowTransl->RegisterYourself();
comp = "(AppBoxOut-AppBoxIn:AppBoxInTranslation)-AppWindow:AppWindowTranslation";
// Type depending translation vectors, with respect to the wrapper.
ApplTransl = new TGeoTranslation("ApplianceTransl", 0., 0., 11.2);
}
auto* Support = new TGeoCompositeShape("SupportBox", comp);
// Create logical volume
auto* Support_log = new TGeoVolume("Appliance", Support, Al);
// G4Color(0,0,0,0.6)=kBlack,SetLineWidth(2),SetForceSolid(true),SetVisibility(true)
Support_log->SetLineWidth(2);
Support_log->SetLineColor(kBlack);
// Loop on the given position vectors and position the volumes.
auto aplItr = m_posVectorAppliances.begin();
int id = 0;
for(; aplItr != m_posVectorAppliances.end(); aplItr++) {
string detname = (*aplItr).first;
TString id_s = "_" + detname;
// Translation vectors, with respect to the wrapper.
// equals type-depending translation plus user given translation.
TGeoTranslation* ApplTranslItr = new TGeoTranslation("ApplianceTransl" + id_s, 0., 0., 0.);
ApplTranslItr->Add(&m_posVectorAppliances[detname]);
ApplTranslItr->Add(ApplTransl);
// Creation of the node.
// The mother volume is the wrapper. It will rotate with the wrapper.
TGeoVolume* Wrapper_log = gGeoManager->GetVolume(WrapperName + id_s);
Wrapper_log->AddNode(Support_log, ++id, ApplTranslItr);
} // end loop positions
LOG(TRACE) << "Construction of the appliances successful.";
}
void GeometryBuilderTGeoModule::BuildTestStructure() {}
void GeometryBuilderTGeoModule::BuildMaterialsAndMedia() {
/* Create the materials and mediums
Important note :
Only simple elements and materials are defined and used, enough for the
geometry description.
It is to the user's responsability to map those elements during the
simulation phase to elements with the proper physical properties.
Example : "Air" to "G4_Air", which could not be reproduced here.
*/
int numed = 0; // user medium index
// Vacuum
// G4Material("Vacuum", z=1 , a=1.01*g/mole, density= 0.0001*g/cm3);
int z = 1;
double a = 1.01; // g/mole
double density = 0.0001; // g/cm3
auto* vacuum_mat = new TGeoMaterial("Vacuum", a, z, density);
new TGeoMedium("Vacuum", 1, vacuum_mat);
// Air
/* AllPix1 uses "G4_AIR"
Material: G4_AIR density: 1.205 mg/cm3 RadL: 303.921 m Nucl.Int.Length: 710.095 m
Imean: 85.700 eV temperature: 293.15 K pressure: 1.00 atm
---> Element: C (C) Z = 6.0 N = 12 A = 12.011 g/mole
---> Isotope: C12 Z = 6 N = 12 A = 12.00 g/mole abundance: 98.930 %
---> Isotope: C13 Z = 6 N = 13 A = 13.00 g/mole abundance: 1.070 %
ElmMassFraction: 0.01 % ElmAbundance 0.02 %
---> Element: N (N) Z = 7.0 N = 14 A = 14.007 g/mole
---> Isotope: N14 Z = 7 N = 14 A = 14.00 g/mole abundance: 99.632 %
---> Isotope: N15 Z = 7 N = 15 A = 15.00 g/mole abundance: 0.368 %
ElmMassFraction: 75.53 % ElmAbundance 78.44 %
---> Element: O (O) Z = 8.0 N = 16 A = 15.999 g/mole
---> Isotope: O16 Z = 8 N = 16 A = 15.99 g/mole abundance: 99.757 %
---> Isotope: O17 Z = 8 N = 17 A = 17.00 g/mole abundance: 0.038 %
---> Isotope: O18 Z = 8 N = 18 A = 18.00 g/mole abundance: 0.205 %
ElmMassFraction: 23.18 % ElmAbundance 21.07 %
---> Element: Ar (Ar) Z = 18.0 N = 40 A = 39.948 g/mole
---> Isotope: Ar36 Z = 18 N = 36 A = 35.97 g/mole abundance: 0.337 %
---> Isotope: Ar38 Z = 18 N = 38 A = 37.96 g/mole abundance: 0.063 %
---> Isotope: Ar40 Z = 18 N = 40 A = 39.96 g/mole abundance: 99.600 %
ElmMassFraction: 1.28 % ElmAbundance 0.47 %
*/
auto* N = new TGeoElement("Nitrogen", "N", z = 7, a = 14.007);
auto* O = new TGeoElement("Oxygen", "O", z = 8, a = 15.999);
auto* C = new TGeoElement("Carbon", "C", z = 6, a = 12.011);
auto* Ar = new TGeoElement("Argon", "Ar", z = 18, a = 39.948);
auto* air_mat = new TGeoMixture("Air", 4, density = 1.205E-3);
air_mat->AddElement(N, 0.7844);
air_mat->AddElement(O, 0.2107);
air_mat->AddElement(C, 0.0002);
air_mat->AddElement(Ar, 0.0047);
new TGeoMedium("Air", ++numed, air_mat);
/* Silicon
AllPix1 uses "G4_Si"
*/
TGeoElementTable* table = gGeoManager->GetElementTable();
TGeoElement* Si = table->FindElement("Si");
auto* Si_mat = new TGeoMaterial("Si", Si, density = 2.330);
new TGeoMedium("Si", ++numed, Si_mat);
/* Epoxy
AllPix1 uses G4_PLEXIGLASS
*/
TGeoElement* H = table->FindElement("H");
auto* plexiglass_mat = new TGeoMixture("Plexiglass", 3, density = 1.19);
plexiglass_mat->AddElement(C, 5);
plexiglass_mat->AddElement(H, 8);
plexiglass_mat->AddElement(O, 2);
new TGeoMedium("Plexiglass", ++numed, plexiglass_mat);
/* Solder SnPb */
auto* Sn = new TGeoElement("Tin", "Sn", z = 50, a = 118.710);
auto* Pb = new TGeoElement("Lead", "Pb", z = 82., a = 207.2);
auto* solder_mat = new TGeoMixture("Solder", 2, density = 8.4);
solder_mat->AddElement(Sn, 63);
solder_mat->AddElement(Pb, 37);
new TGeoMedium("Solder", ++numed, solder_mat);
/* Aluminum
AllPix1 uses G4_Al
*/
TGeoElement* Al = table->FindElement("Al");
auto* Al_mat = new TGeoMaterial("Al", Al, density = 2.699);
new TGeoMedium("Al", ++numed, Al_mat);
}
<file_sep>/src/objects/exceptions.h
/**
* @file
* @brief Collection of all object exceptions
*
* @copyright MIT License
*/
#ifndef ALLPIX_OBJECT_EXCEPTIONS_H
#define ALLPIX_OBJECT_EXCEPTIONS_H
#include <string>
#include "core/utils/exceptions.h"
#include "core/utils/type.h"
namespace allpix {
/**
* @ingroup Exceptions
* @brief Indicates an object that does not contain a reference fetched
*/
class MissingReferenceException : public RuntimeError {
public:
/**
* @brief Constructs an error for a object with missing reference
* @param message Type of the received message
*/
explicit MissingReferenceException(const std::type_info& source, const std::type_info& reference) {
error_message_ = "Object ";
error_message_ += allpix::demangle(source.name());
error_message_ += " is missing reference to ";
error_message_ += allpix::demangle(reference.name());
}
};
} // namespace allpix
#endif /* ALLPIX_OBJECT_EXCEPTIONS_H */
<file_sep>/src/modules/DepositionGeant4/GeneratorActionG4.cpp
/**
* @file
* @brief Implements the particle generator
* @remark Based on code from <NAME>
* @copyright MIT License
*/
#include "GeneratorActionG4.hpp"
#include <limits>
#include <memory>
#include <G4Event.hh>
#include <G4GeneralParticleSource.hh>
#include <G4ParticleDefinition.hh>
#include <G4ParticleTable.hh>
#include "core/config/exceptions.h"
#include "core/utils/log.h"
#include "tools/geant4.h"
using namespace allpix;
GeneratorActionG4::GeneratorActionG4(const Configuration& config)
: particle_source_(std::make_unique<G4GeneralParticleSource>()) {
// Set verbosity of source to off
particle_source_->SetVerbosity(0);
// Get source specific parameters
auto single_source = particle_source_->GetCurrentSource();
// Find Geant4 particle
G4ParticleDefinition* particle =
G4ParticleTable::GetParticleTable()->FindParticle(config.get<std::string>("particle_type"));
if(particle == nullptr) {
// FIXME more information about available particle
throw InvalidValueError(config, "particle_type", "particle type does not exist");
}
// Set global parameters of the source
// FIXME keep number of particles always at one?
single_source->SetNumberOfParticles(1);
single_source->SetParticleDefinition(particle);
// FIXME What is this time
single_source->SetParticleTime(0.0);
// Set position parameters
single_source->GetPosDist()->SetPosDisType("Beam");
single_source->GetPosDist()->SetBeamSigmaInR(config.get<double>("particle_radius_sigma", 0));
single_source->GetPosDist()->SetCentreCoords(config.get<G4ThreeVector>("particle_position"));
// Set distribution parameters
single_source->GetAngDist()->SetAngDistType("planar");
G4ThreeVector direction = config.get<G4ThreeVector>("particle_direction");
if(fabs(direction.mag() - 1.0) > std::numeric_limits<double>::epsilon()) {
LOG(WARNING) << "Momentum direction is not a unit vector: magnitude is ignored";
}
single_source->GetAngDist()->SetParticleMomentumDirection(direction);
// Set energy parameters
single_source->GetEneDist()->SetEnergyDisType("Mono");
single_source->GetEneDist()->SetMonoEnergy(config.get<double>("particle_energy"));
}
/**
* Called automatically for every event
*/
void GeneratorActionG4::GeneratePrimaries(G4Event* event) {
particle_source_->GeneratePrimaryVertex(event);
}
<file_sep>/src/modules/GeometryBuilderGeant4/README.md
## GeometryBuilderGeant4
**Maintainer**: <NAME> (<<EMAIL>>)
**Status**: Functional
#### Description
Constructs the Geant4 geometry from the internal geometry. First constructs the world geometry from the internal world size, with a certain margin, using a particular world material. Then continues to create all the detectors using their internal detector models.
All the available detector models are fully supported. This builder can create extra support layers of the following materials (note that these should be specified in lowercase):
* silicon
* plexiglass
* kapton
* copper
* epoxy
* carbonfiber
* g10
* solder
#### Parameters
* `world_material` : Material of the world, should either be **air** or **vacuum**. Default to **air** if not specified.
* `world_margin_percentage` : Percentage of the world size to add extra compared to the internally calculated minimum world size. Defaults to 0.1, thus 10%.
* `world_minimum_margin` : Minimum absolute margin to add to all sides of the internally calculated minimum world size. Defaults to zero for all axis, thus not having any minimum margin.
* `GDML_output_file` : Optional file to write the geometry to in GDML format. Can only be used if this Geant4 version has GDML support (will throw an error otherwise). Otherwise also likely produces an error due to incomplete GDML implementation in Geant4.
#### Usage
To create a Geant4 geometry using vacuum as world material and with always exactly one meter added to the minimum world size on every side, the following configuration can be used.
```ini
[GeometryBuilderGeant4]
world_material = "vacuum"
world_margin_percentage = 0
world_minimum_margin = 1m 1m 1m
```
<file_sep>/src/modules/DetectorHistogrammer/DetectorHistogrammerModule.hpp
/**
* @file
* @brief Definition of detector histogramming module
* @copyright MIT License
*/
#ifndef ALLPIX_MODULE_DETECTOR_HISTOGRAMMER_H
#define ALLPIX_MODULE_DETECTOR_HISTOGRAMMER_H
#include <memory>
#include <string>
#include <TH1I.h>
#include <TH2I.h>
#include "core/config/Configuration.hpp"
#include "core/geometry/GeometryManager.hpp"
#include "core/messenger/Messenger.hpp"
#include "core/module/Module.hpp"
#include "objects/PixelHit.hpp"
namespace allpix {
/**
* @ingroup Modules
* @brief Module to plot the final digitized pixel data
*
* Generates a hitmap of all the produced pixel hits, together with a histogram of the cluster size
*/
class DetectorHistogrammerModule : public Module {
public:
/**
* @brief Constructor for this detector-specific module
* @param config Configuration object for this module as retrieved from the steering file
* @param messenger Pointer to the messenger object to allow binding to messages on the bus
* @param detector Pointer to the detector for this module instance
*/
DetectorHistogrammerModule(Configuration, Messenger*, std::shared_ptr<Detector>);
/**
* @brief Initialize the histograms
*/
void init() override;
/**
* @brief Fill the histograms
*/
void run(unsigned int) override;
/**
* @brief Write the histograms to the modules file
*/
void finalize() override;
private:
Configuration config_;
std::shared_ptr<Detector> detector_;
// List of pixel hits
std::shared_ptr<PixelHitMessage> pixels_message_;
// Statistics to compute mean position
ROOT::Math::XYVector total_vector_{};
unsigned long total_hits_{};
// Histograms to output
TH2I* histogram; // FIXME: bad name
TH1I* cluster_size;
};
} // namespace allpix
#endif /* ALLPIX_MODULE_DETECTOR_HISTOGRAMMER_H */
<file_sep>/src/core/config/Configuration.cpp
/**
* @file
* @brief Implementation of configuration
* @copyright MIT License
*/
#include "Configuration.hpp"
#include <cassert>
#include <ostream>
#include <stdexcept>
#include <string>
#include "core/utils/file.h"
#include "exceptions.h"
#include <iostream>
using namespace allpix;
Configuration::Configuration(std::string name, std::string path) : name_(std::move(name)), path_(std::move(path)) {}
bool Configuration::has(const std::string& key) const {
return config_.find(key) != config_.cend();
}
std::string Configuration::getName() const {
return name_;
}
std::string Configuration::getFilePath() const {
return path_;
}
std::string Configuration::getText(const std::string& key) const {
try {
// NOTE: returning literally including ""
return config_.at(key);
} catch(std::out_of_range& e) {
throw MissingKeyError(key, getName());
}
}
std::string Configuration::getText(const std::string& key, const std::string& def) const {
if(!has(key)) {
return def;
}
return getText(key);
}
/**
* @throws InvalidValueError If the path did not exists while the check_exists parameter is given
*
* For a relative path the absolute path of the configuration file is preprended. Absolute paths are not changed.
*/
// TODO [doc] Document canonicalizing behaviour
std::string Configuration::getPath(const std::string& key, bool check_exists) const {
try {
return path_to_absolute(get<std::string>(key), check_exists);
} catch(std::invalid_argument& e) {
throw InvalidValueError(*this, key, e.what());
}
}
/**
* @throws InvalidValueError If the path did not exists while the check_exists parameter is given
*
* For all relative paths the absolute path of the configuration file is preprended. Absolute paths are not changed.
*/
// TODO [doc] Document canonicalizing behaviour
std::vector<std::string> Configuration::getPathArray(const std::string& key, bool check_exists) const {
std::vector<std::string> path_array = getArray<std::string>(key);
// Convert all paths to absolute
try {
for(auto& path : path_array) {
path = path_to_absolute(path, check_exists);
}
return path_array;
} catch(std::invalid_argument& e) {
throw InvalidValueError(*this, key, e.what());
}
}
/**
* @throws std::invalid_argument If the path does not exists
*/
std::string Configuration::path_to_absolute(std::string path, bool canonicalize_path) const {
// If not a absolute path, make it an absolute path
if(path[0] != '/') {
// Get base directory of config file
std::string directory = path_.substr(0, path_.find_last_of('/'));
// Set new path
path = directory + "/" + path;
// Normalize path only if we have to check if it exists
// NOTE: This throws an error if the path does not exist
if(canonicalize_path) {
path = allpix::get_absolute_path(path);
}
}
return path;
}
void Configuration::setText(const std::string& key, const std::string& val) {
config_[key] = val;
}
/**
* The alias is only used if new key does not exist but old key does
*/
void Configuration::setAlias(const std::string& new_key, const std::string& old_key) {
if(!has(old_key) || has(new_key)) {
return;
}
try {
config_[new_key] = config_.at(old_key);
} catch(std::out_of_range& e) {
throw MissingKeyError(old_key, getName());
}
}
unsigned int Configuration::countSettings() const {
return static_cast<unsigned int>(config_.size());
}
/**
* All keys that are already defined earlier in this configuration are not changed.
*/
void Configuration::merge(const Configuration& other) {
for(auto config_pair : other.config_) {
// Only merge values that do not yet exist
if(!has(config_pair.first)) {
setText(config_pair.first, config_pair.second);
}
}
}
std::vector<std::pair<std::string, std::string>> Configuration::getAll() {
std::vector<std::pair<std::string, std::string>> result;
// Loop over all configuration keys
for(auto& key_value : config_) {
// Skip internal keys starting with an underscore
if(!key_value.first.empty() && key_value.first[0] == '_') {
continue;
}
result.emplace_back(key_value);
}
return result;
}
<file_sep>/doc/usermanual/chapters/introduction.tex
\section{Introduction}
\label{sec:introduction}
\apsq is a generic simulation framework for silicon tracker and vertex detectors written in modern C++. It is the successor of a previously developed simulation framework called AllPix~\cite{ap1wiki,ap1git}. The goal of the \apsq framework is to provide a complete and easy-to-use package for simulating the performance of detectors from a general source of particles until the digitization of hits in the detector chip.
The framework builds upon other packages to perform tasks in the simulation chain, most notably Geant4~\cite{geant4} for the deposition of charge carriers in the sensor and ROOT~\cite{root} for producing histograms and saving the produced data to storage. The core of the framework focuses on the simulation of charge transport in semiconductor detectors and the digitization to hits in the frontend electronics. The framework does not perform a reconstruction of the particle tracks.
\apsq is designed as a modular framework, allowing for an easy extension to more complex and specialized detector simulations. A modular setup also allows to separate the core of the framework from the implementation of the algorithms in the modules, leading to a framework which is both easier to understand and to maintain. Besides modularity, the \apsq framework was designed with the following main design goals in mind (listed from most to least important):
\begin{enumerate}
\item Reflects the physics
\begin{itemize}
\item A run consists of several sequential events. A single event here refers to an independent passage of one or multiple particles through the setup
\item Detectors are treated as separate objects for particles to pass through
\item All of the information must be contained at the very end of processing every single event (sequential events)
\end{itemize}
\item Ease of use (user-friendly)
\begin{itemize}
\item Simple, intuitive configuration and execution ("does what you expect")
\item Clear and extensive logging and error reporting
\item Implementing a new module should be feasible without knowing all details of the framework
\end{itemize}
\item Flexibility
\begin{itemize}
\item Event loop runs sequence of modules, allowing for both simple and advanced user configurations
\item Possibility to run multiple different modules on different detectors
\item Limit flexibility for the sake of simplicity and ease of use
\end{itemize}
\end{enumerate}
\subsection{History}
Development of AllPix (the original version) started around 2012 as a generic simulation framework for pixel detectors. It has been succesfully used for simulating a variety of different detector setups through the years. Originally written as a Geant4 user application the framework has grown `organically` after new features continued to be added. Around 2016 discussions between collaborators started to discuss a rewrite of the software from scratch. Primary possibilities for improvements included better modularity, more extensive configuration options and an easier geometry setup.
Early development of \apsq started in end of 2016, but most of the initial rework in modern C++ has been carried out in the framework of a technical student project in the beginning of 2017. The core of the framework starts to mature and initial versions of various generic core modules have been created at the time of writing.
\subsection{Scope of this manual}
This document is the primary User's Guide for \apsq. It presents all the necessary requirements to start using the framework. In more detail this manual is designed to:
\begin{itemize}
\item guide all new users through the installation
\item introduce new users to the toolkit for the purpose of running their own simulations
\item explain the structure of the core framework and the components it provides to the modules
\item provide detailed information about all modules and how-to use and configure them
\item describe the required steps for adding a new detector model and implementing a new module
\end{itemize}
In the manual an overview of the framework is given, more detailed information on the code itself can be found in the Doxygen reference manual. The reader does not need any programming experience to get started, but knowledge of (modern) C++ will be useful in the later chapters.
\subsection{Support and reporting issues}
We are happy to receive feedback on any problem that might arise. Reports for issues, questions about unclear parts, as well as suggestions for improvements, are very much appreciated. These should preferably be brought up on the issues page of the repository, which can be found at \url{https://gitlab.cern.ch/simonspa/allpix-squared/issues}.
<file_sep>/src/modules/Dummy/DummyModule.cpp
/**
* @file
* @brief Implementation of [Dummy] module
* @copyright MIT License
*/
#include "DummyModule.hpp"
#include <string>
#include <utility>
#include "core/utils/log.h"
using namespace allpix;
DummyModule::DummyModule(Configuration config, Messenger*, GeometryManager*) : Module(config), config_(std::move(config)) {
// ... Implement ... (Typically bounds the required messages and optionally sets configuration defaults)
LOG(TRACE) << "Initializing module " << getUniqueName();
}
void DummyModule::run(unsigned int) {
// ... Implement ... (Typically uses the configuration to execute function and outputs an message)
LOG(TRACE) << "Running module " << getUniqueName();
}
<file_sep>/doc/usermanual/chapters/installation.tex
\section{Installation}
\label{sec:installation}
After installing and loading the required dependencies, there are various options to customize the installation of \apsq. This chapter contains details on the standard installation process and information about custom installations.
\subsection{Prerequisites}
\label{sec:prerequisites}
\apsq should be able to run without problems on Mac as well as any recent Linux distribution. Windows is not officially supported and will likely never be. It could however be theoretically possible to install \apsq using MinGW or Cygwin, but this has not been tested. The prerequisites can be loaded automatically on CERN's LXPLUS service, please continue to \ref{sec:initialize_dependencies} for an installation on LXPLUS.
The core framework is separated from the individual modules and \apsq has therefore only one required dependency: ROOT 6 (versions below 6 are not supported!)~\cite{root}. If the framework is run on a CERN cluster the default dependencies can be loaded from CVMFS as explained in Section \ref{sec:initialize_dependencies}. Otherwise all required dependencies need to be installed before building \apsq. Please refer to \cite{rootinstallation} for instructions on how to install ROOT. ROOT has several extra components and the GenVector package is required to run \apsq. This package is included in the default build.
For various modules additional dependencies are necessary. For details about the dependencies and their installation visit the module documentation in Section \ref{sec:modules}. The following dependencies are needed to compile the standard installation:
\begin{itemize}
\item Geant4~\cite{geant4}: Simulates the particle beam, depositing charges in the detectors with the help of the constructed geometry. See the instructions in \cite{geant4installation} for details on how to install the software. All the Geant4 datasets are required to run the modules succesfully. Also GDML support could be enabled to save the Geant4 geometry for later review. Finally it is recommended to enable Qt visualization. A useful set of CMake flags to build a functional Geant4 package would be:
\begin{verbatim}
-DGEANT4_INSTALL_DATA=ON
-DGEANT4_BUILD_MULTITHREADED=ON
-DGEANT4_USE_GDML=ON
-DGEANT4_USE_QT=ON
-DGEANT4_USE_XM=ON
-DGEANT4_USE_OPENGL_X11=ON
-DGEANT4_USE_SYSTEM_CLHEP=OFF
\end{verbatim}
\item Eigen3~\cite{eigen3}: Vector package to do Runge-Kutta integration in the generic charge propagation module. Eigen is available in almost all Linux distributions through the package manager. Otherwise it can be easily installed, because it is a header-only library.
\end{itemize}
Extra flags needs to be set for building an \apsq installation without these dependencies. Details about these configuration options are given in Section \ref{sec:cmake_config}.
\subsection{Downloading the source code}
The latest version of \apsq can be fetched from the Gitlab repository at \url{https://gitlab.cern.ch/simonspa/allpix-squared}. This version is under heavy development, but should work out-of-the-box. The software can be cloned and accessed as follows:
\begin{verbatim}
$ git clone https://gitlab.cern.ch/simonspa/allpix-squared
$ cd allpix-squared
\end{verbatim}
\subsection{Initializing the dependencies}
\label{sec:initialize_dependencies}
Before continuing with the build, the necessary setup scripts for ROOT and Geant4 (unless a build without Geant4 modules is attempted) should be run. In Bash on a private Linux machine this means executing the following two commands from their respective installation directories (replacing \textit{\textless root\_install\_dir\textgreater} with the local ROOT installation directory and similar for Geant):
\begin{verbatim}
$ source <root_install_dir>/bin/thisroot.sh
$ source <geant4_install_dir>/bin/geant4.sh
\end{verbatim}
On the CERN LXPLUS service a standard initialization script is available to load all dependencies from the CVMFS infrastructure. This script should be run as follows (from the main repository directory):
\begin{verbatim}
$ source etc/scripts/setup_lxplus.sh
\end{verbatim}
\subsection{Configuration via CMake}
\label{sec:cmake_config}
\apsq uses the CMake build system to build and install the core framework and the modules. An out-of-source build is recommended: this means CMake should not be directly executed in the source folder. Instead a \textit{build} folder should be created inside the source folder from which CMake should be run. For a standard build without any flags this implies executing:
\begin{verbatim}
$ mkdir build
$ cd build
$ cmake ..
\end{verbatim}
CMake can be run with several extra arguments to change the type of installation. These options can be set with -D\textit{option} (see the end of this section for an example). Currently the following options are supported:
\begin{itemize}
\item \textbf{CMAKE\_INSTALL\_PREFIX}: The directory to use as a prefix for installing the binaries, libraries and data. Defaults to the source directory (where the folders \textit{bin/} and \textit{lib/} are added).
\item \textbf{CMAKE\_BUILD\_TYPE}: Type of build to install, defaults to \texttt{RelWithDebInfo} (compiles with optimizations and debug symbols). Other possible options are \texttt{Debug} (for compiling with no optimizations, but with debug symbols and extended tracing using the Clang Address Sanitizer library) and \texttt{Release} (for compiling with full optimizations and no debug symbols).
\item \textbf{MODEL\_DIRECTORY}: Directory to install the internal models to. Defaults to not installing if the \textbf{CMAKE\_INSTALL\_PREFIX} is set to the directory containing the sources (the default). Otherwise the default value is equal to the directory \textbf{CMAKE\_INSTALL\_PREFIX}\-\textit{/share/allpix/}. The install directory is automatically added to the model search path used by the geometry model parsers to find all the detector models.
\item \textbf{BUILD\_\textit{module\_name}}: If the specific \textit{module\_name} should be installed or not. Defaults to ON, thus all modules are installed by default. This set of parameters have to be set appropriately for a build without extra dependencies as specified in \ref{sec:prerequisites}.
\item \textbf{BUILD\_ALL\_MODULES}: Build all included modules, defaulting to OFF. This overwrites any selection using the parameters described above.
\end{itemize}
An example of a custom installation with debugging, without the GeometryBuilderGeant4 module and installed to a custom directory, is shown below:
\begin{verbatim}
$ mkdir build
$ cd build
$ cmake -DCMAKE_INSTALL_PREFIX=../install/ \
-DCMAKE_BUILD_TYPE=DEBUG \
-DBUILD_GeometryBuilderGeant4=OFF ..
\end{verbatim}
\subsection{Compilation and installation}
Compiling the framework is now a single command in the build folder created earlier (replacing \textit{\textless number\_of\_cores> \textgreater} with the number of cores to use for compilation):
\begin{verbatim}
$ make -j<number_of_cores>
\end{verbatim}
The compiled (non-installed) version of the executable can be found at \textit{src/exec/allpix} in the build folder. Running \apsq directly without installing can be useful for developers. It is not recommended for normal users, because the correct library and model paths are only fully configured during installation.
To install the library to the selected install location (defaulting to the source directory) requires the following command:
\begin{verbatim}
$ make install
\end{verbatim}
The binary is now available as \textit{bin/allpix} in the installation directory. The example configuration files are not installed as they should only be used as a starting point for your own configuration. They can however be used to check if the installation was succesful. Running the allpix binary with the example configuration (like \texttt{bin/allpix -c \textit{etc/example.conf}}) should run without problems when a standard installation is used.
<file_sep>/src/modules/GeometryBuilderTGeo/GeometryBuilderTGeoModule.hpp
/// @file
/// @brief Implementation of the TGeo geometry builder
///
/// Builds the detector geometry according to user defined parameters
///
/// To do :
/// - Refer to the detector desc with their names instead of integers
///
/// @date March 30 2017
/// @version 0.13
#ifndef ALLPIX_DETECTOR_CONSTRUCTION_TGEO_H
#define ALLPIX_DETECTOR_CONSTRUCTION_TGEO_H
#include <map>
#include <memory>
#include <Math/Vector3D.h>
#include <TGeoManager.h>
#include "core/config/Configuration.hpp"
#include "core/geometry/GeometryManager.hpp"
#include "core/geometry/HybridPixelDetectorModel.hpp"
#include "core/messenger/Messenger.hpp"
#include "core/module/Module.hpp"
/* Names of detector parts
* These are extremely important and should be placed in a visible way,
* as they will be used to retrieve the objects from the gGeoManager.
*/
const TString WrapperName = "Wrapper";
const TString supportName = "support";
const TString WaferName = "Wafer"; // Box in AllPix1
const TString CoverName = "Coverlayer";
const TString SliceName = "Slice";
const TString PixelName = "Pixel";
const TString ChipName = "Chip";
const TString BumpName = "Bump";
const TString GuardRingsName = "GuardRings";
// FIXME To be placed in a more adequate place
TGeoTranslation ToTGeoTranslation(const ROOT::Math::XYZPoint& pos);
TString Print(TGeoTranslation* trl);
namespace allpix {
/**
* @ingroup Modules
* @brief Module to construct the TGeo from the internal geometry
*/
class GeometryBuilderTGeoModule : public Module {
public:
/**
* @brief Constructs geometry construction module
* @param geo_manager Pointer to the geometry manager, containing the detectors
* @param config Configuration object of the geometry builder module
*/
GeometryBuilderTGeoModule(Configuration config, Messenger*, GeometryManager*);
/**
* @brief Initializes and constructs the TGeo geometry
*/
void init() override;
private:
Configuration m_config;
/**
* @brief Construct the TGeo geometry
*/
void Construct();
/**
* @brief Build all detector devices
*/
void BuildPixelDevices();
/**
* @brief Build all the materials
*/
void BuildMaterialsAndMedia();
/**
* @brief Build optional appliances
*/
void BuildAppliances();
/**
* @brief Build optional test structures
*/
void BuildTestStructure();
// Global internal variables
GeometryManager* m_geoDscMng;
TGeoMedium* m_fillingWorldMaterial;
// User defined parameters
/*
Medium to fill the World. Available media :
- Air
- Vacuum
*/
TString m_userDefinedWorldMaterial;
TString m_userDefinedGeoOutputFile;
bool m_buildAppliancesFlag;
int m_Appliances_type;
bool m_buildTestStructureFlag;
std::map<std::string, ROOT::Math::XYZVector> m_vectorWrapperEnhancement;
std::map<std::string, TGeoTranslation> m_posVectorAppliances; //
};
} // namespace allpix
#endif /* ALLPIX_DETECTOR_CONSTRUCTION_TGEO_H */
<file_sep>/src/objects/MCParticle.hpp
/**
* @file
* @brief Definition of Monte-Carlo particle object
* @copyright MIT License
*/
#ifndef ALLPIX_MC_PARTICLE_H
#define ALLPIX_MC_PARTICLE_H
#include <Math/Point3D.h>
#include "Object.hpp"
namespace allpix {
/**
* @brief Monte-Carlo particle through the sensor
*/
class MCParticle : public Object {
public:
/**
* @brief Construct a Monte-Carlo particle
* @param local_entry_point Entry point of the particle in the sensor in local coordinates
* @param global_entry_point Entry point of the particle in the sensor in global coordinates
* @param local_exit_point Exit point of the particle in the sensor in local coordinates
* @param global_exit_point Exit point of the particle in the sensor in global coordinates
* @param particle_id Identifier for the particle type
*/
MCParticle(ROOT::Math::XYZPoint local_entry_point,
ROOT::Math::XYZPoint global_entry_point,
ROOT::Math::XYZPoint local_exit_point,
ROOT::Math::XYZPoint global_exit_point,
int particle_id);
/**
* @brief Get the entry point of the particle in local coordinates
* @return Particle entry point
*/
ROOT::Math::XYZPoint getLocalEntryPoint() const;
/**
* @brief Get the entry point of the particle in global coordinates
* @return Particle entry point
*/
ROOT::Math::XYZPoint getGlobalEntryPoint() const;
/**
* @brief Get the exit point of the particle in local coordinates
* @return Particle exit point
*/
ROOT::Math::XYZPoint getLocalExitPoint() const;
/**
* @brief Get the entry point of the particle in global coordinates
* @return Particle entry point
*/
ROOT::Math::XYZPoint getGlobalExitPoint() const;
/**
* @brief Get particle identifier
* @return Particle identifier
*/
int getParticleID() const;
/**
* @brief ROOT class definition
*/
ClassDef(MCParticle, 1);
/**
* @brief Default constructor for ROOT I/O
*/
MCParticle() = default;
private:
ROOT::Math::XYZPoint local_entry_point_{};
ROOT::Math::XYZPoint global_entry_point_{};
ROOT::Math::XYZPoint local_exit_point_{};
ROOT::Math::XYZPoint global_exit_point_{};
int particle_id_{};
};
/**
* @brief Typedef for message carrying MC particles
*/
using MCParticleMessage = Message<MCParticle>;
} // namespace allpix
#endif
<file_sep>/src/modules/RCEWriter/README.md
## RCEWriter
**Maintainer**: <NAME> (<<EMAIL>>)
**Status**: Functional
**Input**: *PixelHitMessage*
#### Description
Reads in the Pixel hit messages and saves track data in the RCE format, appropriate for the Proteus telescope reconstruction software. An event tree and a sensor tree and their branches are initialized when the module first runs. The event tree is initialized with the appropriate branches, while a sensor tree is created for each detector and the brances initialized from a strcut. Initially, the program loops over all the pixel hit messages, and then over all the hits in the message, and writes data to the tree branches in the RCE Format. If there are no hits, the event is saved with nHits = 0, with the other fields empty.
#### Parameters
* `file_name` : Name of the data file (without the .root suffix) to create, relative to the output directory of the framework. The default filename is *rce_data.root*
#### Usage
To create the default file (with the name *rce_data.root*) an instantiation without arguments can be placed at the end of the configuration:
```ini
[RCEWriter]
```
<file_sep>/src/objects/PixelCharge.cpp
/**
* @file
* @brief Implementation of object with set of particles at pixel
* @copyright MIT License
*/
#include "PixelCharge.hpp"
#include "exceptions.h"
using namespace allpix;
PixelCharge::PixelCharge(Pixel pixel, unsigned int charge, std::vector<const PropagatedCharge*> propagated_charges)
: pixel_(std::move(pixel)), charge_(charge) {
for(auto& propagated_charge : propagated_charges) {
propagated_charges_.Add(const_cast<PropagatedCharge*>(propagated_charge)); // NOLINT
}
}
Pixel PixelCharge::getPixel() const {
return pixel_;
}
unsigned int PixelCharge::getCharge() const {
return charge_;
}
/**
* @throws MissingReferenceException If the pointed object is not in scope
*
* Objects are stored as TRefArray and can only be accessed if pointed objects are in scope
*/
std::vector<const PropagatedCharge*> PixelCharge::getPropagatedCharges() const {
// FIXME: This is not very efficient unfortunately
std::vector<const PropagatedCharge*> propagated_charges;
for(int i = 0; i < propagated_charges_.GetEntries(); ++i) {
propagated_charges.emplace_back(dynamic_cast<PropagatedCharge*>(propagated_charges_[i]));
if(propagated_charges.back() == nullptr) {
throw MissingReferenceException(typeid(*this), typeid(PropagatedCharge));
}
}
return propagated_charges;
}
ClassImp(PixelCharge)
<file_sep>/src/modules/GeometryBuilderTGeo/CMakeLists.txt
# Define module
ALLPIX_UNIQUE_MODULE(MODULE_NAME)
# Print message if GDML support is not enabled in ROOT
IF(ROOT_gdml_FOUND)
message(STATUS "Geometry export in GDML format is enabled.")
ADD_DEFINITIONS(-DROOT_GDML)
ELSE()
message(STATUS "Geometry export in GDML format is disabled. To enable it, configure and compile ROOT with the option -Dgdml=ON.")
ENDIF()
# Add source files to library
ALLPIX_MODULE_SOURCES(${MODULE_NAME}
GeometryBuilderTGeoModule.cpp
)
# Provide standard install target
ALLPIX_MODULE_INSTALL(${MODULE_NAME})
<file_sep>/src/core/messenger/Message.cpp
/**
* @file
* @brief Implementation of message
*
* @copyright MIT License
*/
#include "Message.hpp"
#include <memory>
#include <utility>
#include "exceptions.h"
using namespace allpix;
BaseMessage::BaseMessage() = default;
BaseMessage::BaseMessage(std::shared_ptr<const Detector> detector) : detector_(std::move(detector)) {}
BaseMessage::~BaseMessage() = default;
std::shared_ptr<const Detector> BaseMessage::getDetector() const {
return detector_;
}
/**
* @throws MessageWithoutObjectException If this method is not overridden
*
* The override method should return the exact same data but then casted to objects or throw the default exception if this is
* not possible.
*/
std::vector<std::reference_wrapper<Object>> BaseMessage::getObjectArray() {
throw MessageWithoutObjectException(typeid(*this));
}
<file_sep>/src/modules/GeometryBuilderTGeo/README.md
## GeometryBuilderTGeo
**Maintainer**: <NAME> (<<EMAIL>>)
**Status**: OUTDATED (not supported)
#### Description
Constructs an TGeo representation of the internal geometry. Creates all detector devices and also add optional appliances and an optional test structure. Code is based on Geant4 geometry construction in original AllPix. Only supports hybrid pixel detectors.
#### Parameters
* `world_material` : Material used to use to represent the world. There are two possible options, either **Vacuum** or **Air**.
* `world_size` : Size of the world (centered at the origin). Defaults to a box of 1x1x1 cubic meter.
* `build_appliances` : Determines if appliances are enabled.
* `appliances_type` : Type of the appliances to be constructed (see source code for options). Only used if *build_appliances* is enabled.
* `build_test_structures` : Determines if the test structure has to be build.
* `output_file` : Optional ROOT file to write the constructed geometry into
* `GDML_output_file` : Optional file to write geometry to in the GDML format. Can only be used if the used ROOT version has GDML support (will throw an error otherwise).
#### Usage
An example to construct a simple TGeo geometry without appliances and no test structure and a world of 5x5x5 cubic meters.
```ini
[GeometryBuilderTGeo]
world_material = "Air"
world_size = 5m 5m 5m
build_appliances = 0
build_test_structures = 0
```
<file_sep>/src/core/config/ConfigManager.hpp
/**
* @file
* @brief Interface to the main configuration and its normal and special sections
* @copyright MIT License
*/
#ifndef ALLPIX_CONFIG_MANAGER_H
#define ALLPIX_CONFIG_MANAGER_H
#include <set>
#include <string>
#include <vector>
#include "ConfigReader.hpp"
#include "Configuration.hpp"
namespace allpix {
/**
* @ingroup Managers
* @brief Manager responsible for loading and providing access to the main configuration
*
* The main configuration is the single most important source of configuration. It is split up in:
* - Global headers that are combined into a single global (not module specific) configuration
* - Ignored headers that are not used at all (mainly useful for debugging)
* - All other headers representing all modules that have to be instantiated by the ModuleManager
*
* Configuration sections are always case-sensitive.
*/
class ConfigManager {
public:
/**
* @brief Construct the configuration manager
* @param file_name Path to the main configuration file
*/
explicit ConfigManager(std::string file_name);
/**
* @brief Use default destructor
*/
~ConfigManager() = default;
/// @{
/**
* @brief Copying the manager is not allowed
*/
ConfigManager(const ConfigManager&) = delete;
ConfigManager& operator=(const ConfigManager&) = delete;
/// @}
/// @{
/**
* @brief Use default move behaviour
*/
ConfigManager(ConfigManager&&) noexcept = default;
ConfigManager& operator=(ConfigManager&&) noexcept = default;
/// @}
/**
* @brief Set the name of the global header and add to the global names
* @param name Name of a global header that should be used as the name
*/
// TODO [doc] Should only set the name and do not add it
void setGlobalHeaderName(std::string name);
/**
* @brief Add a global header name
* @param name Name of a global header section
*/
// TODO [doc] Rename to addGlobalHeader
void addGlobalHeaderName(std::string name);
/**
* @brief Get the global configuration
* @return Global configuration
*/
Configuration getGlobalConfiguration();
/**
* @brief Add a header name to fully ignore
* @param name Name of a header to ignore
*/
// TODO [doc] Rename to ignoreHeader
void addIgnoreHeaderName(std::string name);
/**
* @brief Return if section with given name exists
* @param name Name of the section
* @return True if at least one section with that name exists, false otherwise
*/
bool hasConfiguration(const std::string&);
/**
* @brief Get all configurations that are not global or ignored
* @return List of all normal configurations
*/
std::vector<Configuration> getConfigurations() const;
private:
std::string file_name_;
ConfigReader reader_;
std::string global_default_name_;
std::set<std::string> global_names_;
std::set<std::string> ignore_names_;
};
} // namespace allpix
#endif /* ALLPIX_CONFIG_MANAGER_H */
<file_sep>/src/objects/PropagatedCharge.hpp
/**
* @file
* @brief Definition of propagated charge object
* @copyright MIT License
*/
#ifndef ALLPIX_PROPAGATED_CHARGE_H
#define ALLPIX_PROPAGATED_CHARGE_H
#include "DepositedCharge.hpp"
#include "SensorCharge.hpp"
namespace allpix {
/**
* @ingroup Objects
* @brief Set of charges propagated through the sensor
*/
class PropagatedCharge : public SensorCharge {
public:
/**
* @brief Construct a set of propagated charges
* @param local_position Local position of the propagated set of charges in the sensor
* @param global_position Global position of the propagated set of charges in the sensor
* @param charge Total charge propagated
* @param event_time Total time of propagation arrival after event start
* @param deposited_charge Optional pointer to related deposited charge
*/
PropagatedCharge(ROOT::Math::XYZPoint local_position,
ROOT::Math::XYZPoint global_position,
CarrierType type,
unsigned int charge,
double event_time,
const DepositedCharge* deposited_charge = nullptr);
/**
* @brief Get related deposited charge
* @return Pointer to possible deposited charge
*/
const DepositedCharge* getDepositedCharge() const;
/**
* @brief ROOT class definition
*/
ClassDef(PropagatedCharge, 2);
/**
* @brief Default constructor for ROOT I/O
*/
PropagatedCharge() = default;
private:
TRef deposited_charge_;
};
/**
* @brief Typedef for message carrying propagated charges
*/
using PropagatedChargeMessage = Message<PropagatedCharge>;
}
#endif
<file_sep>/src/core/config/exceptions.cpp
/**
* @file
* @brief Implementation of configuration exceptions
*
* @copyright MIT License
*/
#include "exceptions.h"
#include "Configuration.hpp"
using namespace allpix;
InvalidValueError::InvalidValueError(const Configuration& config, const std::string& key, const std::string& reason) {
std::string section_str = "in section '" + config.getName() + "'";
if(config.getName().empty()) {
section_str = "in empty section";
}
error_message_ = "Value " + config.getText(key) + " of key '" + key + "' " + section_str + " is not valid";
if(!reason.empty()) {
error_message_ += ": " + reason;
}
}
<file_sep>/etc/scripts/make_module.sh
#!/bin/bash
echo -e "\nPreparing code basis for a new module:\n"
# Ask for module name:
read -p "Name of the module? " MODNAME
# Ask for module type:
echo "Type of the module?"
type=0
select yn in "unique" "detector"; do
case $yn in
unique ) type=1; break;;
detector ) type=2; break;;
esac
done
echo "Creating directory and files..."
echo
# Try to find the modules directory:
DIRECTORIES[0]="../src/modules"
DIRECTORIES[1]="src/modules"
MODDIR=""
for DIR in "${DIRECTORIES[@]}"; do
if [ -d "$DIR" ]; then
MODDIR="$DIR"
break
fi
done
# Create directory
mkdir "$MODDIR/$MODNAME"
# Copy over CMake file and sources from Dummy:
sed -e "s/Dummy/$MODNAME/g" "$MODDIR/Dummy/CMakeLists.txt" > "$MODDIR/$MODNAME/CMakeLists.txt"
# Copy over the README, setting current git username/email as author
# If this fails, use system username and hostname
MYNAME=$(git config user.name)
MYMAIL=$(git config user.email)
if [ -z "$MYNAME" ]; then
MYNAME=$(whoami)
fi
if [ -z "$MYMAIL" ]; then
MYMAIL=$(hostname)
fi
sed -e "s/Dummy/$MODNAME/g" \
-e "s/\*NAME\*/$MYNAME/g" \
-e "s/\*EMAIL\*/$MYMAIL/g" \
-e "s/Functional/Immature/g" \
"$MODDIR/Dummy/README.md" > "$MODDIR/$MODNAME/README.md"
# Copy over source code skeleton:
sed -e "s/Dummy/$MODNAME/g" "$MODDIR/Dummy/DummyModule.hpp" > "$MODDIR/$MODNAME/${MODNAME}Module.hpp"
sed -e "s/Dummy/$MODNAME/g" "$MODDIR/Dummy/DummyModule.cpp" > "$MODDIR/$MODNAME/${MODNAME}Module.cpp"
# Change to detetcor module type if necessary:
if [ "$type" == 2 ]; then
sed -i -e "s/_UNIQUE_/_DETECTOR_/g" "$MODDIR/$MODNAME/CMakeLists.txt"
sed -i -e "s/ unique / detector-specific /g" \
-e "/param geo/c\ \* \@param detector Pointer to the detector for this module instance" \
-e "s/GeometryManager\* geo\_manager/std::shared\_ptr\<Detector\> detector/g" \
-e "s/GeometryManager/DetectorModel/g" \
"$MODDIR/$MODNAME/${MODNAME}Module.hpp"
sed -i -e "s/GeometryManager\*/std::shared\_ptr\<Detector\> detector/g" \
-e "s/Module(config)/Module\(config\, detector\)/g" \
"$MODDIR/$MODNAME/${MODNAME}Module.cpp"
fi
echo "Name: $MODNAME"
echo "Author: $MYNAME ($MYMAIL)"
echo "Path: $MODDIR/$MODNAME"
echo
echo "Re-run CMake in order to build your new module."
<file_sep>/src/modules/ROOTObjectWriter/README.md
## ROOTObjectWriter
**Maintainer**: <NAME> (<<EMAIL>>)
**Status**: Functional
**Input**: *all objects in simulation*
#### Description
Reads in all the messages dispatched by the framework that contain AllPix objects (which should normally be all messages). Every of those messages contain a vector of objects, which are converted to a vector to pointers of the object base class. The first time a new type of object is received a new tree is created having the class name of this object as name. Then for every combination of detector and message name a new branch is created in this tree. A leaf is automatically created for every member of the object. The vector of objects is then written to the file for every event it is dispatched (saving an empty vector if that event did not include the specific object).
If the same type of messages is dispatched multiple times, it is combined and written to the same tree. Thus the information about those separate messages is lost. It is also currently not possible to limit the data that is written to the file. If only a subset of the objects is needed than the rest of the data should be discarded afterwards.
#### Parameters
* `file_name` : Name of the data file (without the .root suffix) to create, relative to the output directory of the framework.
#### Usage
To create the default file (with the name *data.root*) an instantiation without arguments can be placed at the end of the configuration:
```ini
[ROOTObjectWriter]
```
<file_sep>/src/modules/DepositionGeant4/GeneratorActionG4.hpp
/**
* @file
* @brief Defines the particle generator
* @copyright MIT License
*/
#ifndef ALLPIX_SIMPLE_DEPOSITION_MODULE_GENERATOR_ACTION_H
#define ALLPIX_SIMPLE_DEPOSITION_MODULE_GENERATOR_ACTION_H
#include <memory>
#include <G4GeneralParticleSource.hh>
#include <G4ParticleDefinition.hh>
#include <G4SDManager.hh>
#include <G4ThreeVector.hh>
#include <G4VUserPrimaryGeneratorAction.hh>
#include "core/config/Configuration.hpp"
namespace allpix {
/**
* @brief Generates the particles in every event
*/
class GeneratorActionG4 : public G4VUserPrimaryGeneratorAction {
public:
/**
* @brief Constructs the generator action
* @param config Configuration of the \ref DepositionGeant4Module module
*/
explicit GeneratorActionG4(const Configuration& config);
/**
* @brief Generate the particle for every event
*/
void GeneratePrimaries(G4Event*) override;
private:
std::unique_ptr<G4GeneralParticleSource> particle_source_;
};
} // namespace allpix
#endif /* ALLPIX_SIMPLE_DEPOSITION_MODULE_GENERATOR_ACTION_H */
<file_sep>/src/objects/PixelCharge.hpp
/**
* @file
* @brief Definition of object with set of particles at pixel
* @copyright MIT License
*/
#ifndef ALLPIX_PIXEL_CHARGE_H
#define ALLPIX_PIXEL_CHARGE_H
#include <Math/DisplacementVector2D.h>
#include <TRefArray.h>
#include "Object.hpp"
#include "Pixel.hpp"
#include "PropagatedCharge.hpp"
namespace allpix {
/**
* @ingroup Objects
* @brief Set of charges at a pixel
*/
class PixelCharge : public Object {
public:
/**
* @brief Construct a set of charges at a pixel
* @param pixel Object holding the information of the pixel
* @param charge Amount of charge stored at this pixel
* @param propagated_charges Optional pointer to the related propagated charges
*/
PixelCharge(Pixel pixel,
unsigned int charge,
std::vector<const PropagatedCharge*> propagated_charges = std::vector<const PropagatedCharge*>());
/**
* @brief Get the pixel containing the charges
* @return Pixel indices in the grid
*/
Pixel getPixel() const;
/**
* @brief Get the charge at the pixel
* @return Total charge stored
*/
unsigned int getCharge() const;
/**
* @brief Get related propagated charges
* @return Possible set of pointers to propagated charges
*/
std::vector<const PropagatedCharge*> getPropagatedCharges() const;
/**
* @brief ROOT class definition
*/
ClassDef(PixelCharge, 2);
/**
* @brief Default constructor for ROOT I/O
*/
PixelCharge() = default;
private:
Pixel pixel_;
unsigned int charge_{};
TRefArray propagated_charges_;
};
/**
* @brief Typedef for message carrying pixel charges
*/
using PixelChargeMessage = Message<PixelCharge>;
} // namespace allpix
#endif
<file_sep>/src/modules/SimpleTransfer/README.md
## SimpleTransfer
**Maintainer**: <NAME> (<<EMAIL>>)
**Status**: Functional
**Input**: PropagatedCharge
**Output**: PixelCharge
#### Description
Combines individual sets of propagated charges together to a set of charges on the sensor pixels. The module does a simple direct mapping to the nearest pixel, ignoring propagated charges that are too far away from the implants or outside the pixel grid. Timing information for the pixel charges is currently not yet produced, but can be fetched from the linked propagated charges.
#### Parameters
* `max_depth_distance` : Maximum distance in the depth direction (normal to the pixel grid) from the implant side for a propagated charge to be taken into account.
#### Usage
For typical simulation purposes a *max_depth_distance* around 10um should be sufficient, leading to the following configuration:
```ini
[SimpleTransfer]
max_depth_distance = 10um
```
<file_sep>/doc/usermanual/chapters/framework.tex
\section{The \apsq Framework}
\label{sec:framework}
The framework is split up in the following four main components that together form \apsq:
\begin{enumerate}
\item \textbf{Core}: The core contains the internal logic to initiate the modules, provide the geometry, facilitate module communication and run the event sequence. The core keeps its dependencies to a minimum (it only relies on ROOT) and remains separated from the other components as far as possible. It is the main component discussed in this section.
\item \textbf{Modules}: A set of methods that execute a part of the simulation chain. These are build as separate libraries, loaded dynamically by the core. The available modules and their parameters are discussed in more detail in Section \ref{sec:modules}.
\item \textbf{Objects}: Objects are the data passed around between modules using the message framework provided by the core. Modules can listen and bind to messages with objects they wish to receive. Messages are identified by the object type they are carrying, but they can also be named to allow redirecting data to specific modules facilitating more sophisticated simulations. Messages are meant to be read-only and a copy of the data should be made if a module wishes to change the data. All objects are contained into a separate library, automatically linked to every module. More information about the messaging system and the supported objects can be found in Section \ref{sec:objects_messages}.
\item \textbf{Tools}: \apsq provides a set of header-only 'tools' that provide access to common logic shared by various modules. An example is a Eigen Runge-Kutta solver and a set of template specializations for ROOT and Geant4 configuration. More information about these can be found in Section \ref{sec:additional_tools_resources}. This set of tools is different from the set of core utilities the framework provides by itself, which are part of the core and explained in \ref{sec:logging_utilities}
\end{enumerate}
Finally \apsq provides an executable which instantiates the core, passes the configuration and runs the simulation chain.
In this chapter, first an overview of the architectural setup of the core is given and how it interacts with the total \apsq framework. Afterwards, the different subcomponents are discussed and explained in more detail. Some C++ code will be provided in the text, but readers not interested may skip the technical details.
\subsection{Architecture of the Core}
The core is constructed as a light-weight framework that provides various subsystems to the modules. It also contains the part responsible for instantiating and running the modules from the supplied configuration file. The core is structured around five subsystems of which four are centered around a manager and the fifth contain a set of simple general utilities. The systems provided are:
\begin{enumerate}
\item \textbf{Configuration}: Provides a general configuration object from which data can be retrieved or stored, together with a TOML-like~\cite{tomlgit} file parser to instantiate the configurations. Also provides a general \apsq configuration manager providing access to the main configuration file and its sections. It is used by the module manager system to find the required instantiations and access the global configuration. More information is given in Section \ref{sec:config_parameters}.
\item \textbf{Module}: Contain the base class of all the \apsq modules and the manager responsible for loading and running the modules (using the configuration system). This component is discussed in more detail in Section \ref{sec:module_manager}.
\item \textbf{Geometry}: Supplies helpers for the simulation geometry. The manager instantiates all detectors from the detector configuration file. A detector has a certain position and orientation linked to an instantiation of a particular detector model. The detector model contains all parameters describing the geometry of the detector. More details about the geometry and detector models is provided in Section \ref{sec:models_geometry}.
\item \textbf{Messenger}: The messenger is responsible for sending objects from one module to another. The messenger object is passed to every module and can be used to bind to messages to listen for. Messages with objects are also dispatched through the messenger to send data to the modules listening. Please refer to Section \ref{sec:objects_messages} for more details.
\item \textbf{Utilities}: The framework provides a set of simple utilities for logging, file and directory access and unit conversion. An explanation how to use of these utilities can be found in Section \ref{sec:logging_utilities}. A set of C++ exceptions is also provided in the utilities, which are inherited and extended by the other components. Proper use of exceptions, together with logging informational messages and reporting errors, make the framework easier to use and debug. A few notes about the use and structure of exceptions are given in Section \ref{sec:error_reporting_exceptions}.
\end{enumerate}
\subsection{Configuration and Parameters}
\label{sec:config_parameters}
Modules and the framework are configured through configuration files. An explanation how to use the various configuration files together with several examples are provided in Section \ref{sec:configuration_files}. All configuration files follow the same format, but the way their input is interpreted differs per configuration file.
\subsubsection{File format}
\label{sec:config_file_format}
Throughout the framework a standard format is used for the configuration files, a simplified version of TOML~\cite{tomlgit}. The rules for this format are as follows:
\begin{enumerate}
\item All whitespace at the beginning or end of a line should be stripped by the parser. Empty lines should be ignored.
\item Every non-empty line should start with either \texttt{\#}, \texttt{[} or an alphanumeric character. Every other character should lead to an immediate parse error.
\item If the line starts with \texttt{\#}, it is interpreted as comment and all other content on the same line is ignored
\item If the line starts with \texttt{[}, the line indicates a section header (also known as configuration header). The line should contain an alphanumeric string indicating the header name followed by \texttt{]} to end the header (a missing \texttt{]} should raise an exception). Multiple section header with the same name are allowed. All key-value pairs following this section header are part of this section until a new section header is started. After any number of ignored whitespace characters there may be a \texttt{\#} character. If that is the case, the rest of the line is handled as specified in point~3.
\item If the line starts with an alphanumeric character, the line should indicate a key-value pair. The beginning of the line should contain an string of alphabetic characters, numbers and underscores, but it may not start with an underscore. This string indicates the 'key'. After a optional number of ignored whitespace, the key should be followed by an \texttt{$=$}. Any text between the \texttt{$=$} and the first \texttt{\#} character not enclosed within a pair of \texttt{"} characters is known as the non-stripped 'value'. Any character from the \texttt{\#} is handled as specified in point 3. If the line does not contain any non-enclosed \texttt{\#} character the value ends at the end of the line instead. The 'value' of the key-value pair is the non-stripped 'value' with all whitespace in front and the end stripped.
\item The value can either be accessed as a single value or an array. If the value is accessed as an array, the string is split at every whitespace or \texttt{,} character not enclosed in a pair of \texttt{"} characters. All empty entities are not considered. All other entities are treated as single values in the array.
\item All single values are stored as a string containing at least one character. The conversion to the actual type is executed when accessing the value.
\item All key-value pairs defined before the first section header are part of a zero-length empty section header
\end{enumerate}
\subsubsection{Accessing parameters}
\label{sec:accessing_parameters}
All values are accessed via the configuration object. In the following example the key is a string called \textbf{key}, the object is named \textbf{config} and the type \textbf{TYPE} is a valid C++ type that the value should represent. The values can be accessed via the following methods:
\begin{minted}[frame=single,framesep=3pt,breaklines=true,tabsize=2,linenos]{c++}
// Returns true if the key exists and false otherwise
config.has("key")
// Returns the value in the given type, throws an exception if not existing
config.get<TYPE>("key")
// Returns the value in the given type or the provided default value if it does not exist
config.get<TYPE>("key", default_value)
// Returns an array of single values of the given type; throws if the key does not exist
config.getArray<TYPE>("key")
// Returns an absolute (canonical if it should exist) path to a file
config.getPath("key", true /* check if path exists */)
// Return an array of absolute paths
config.getPathArray("key", false /* check if paths exists */)
// Returns the key as literal text including possible quotation marks
config.getText("key")
// Set the value of key to the default value if the key is not defined
config.setDefault("key", default_value)
// Set the value of the key to the defaults array if key is not defined
config.setDefaultArray<TYPE>("key", vector_of_default_values)
\end{minted}
The conversions to the type are using the \texttt{from\_string} and \texttt{to\_string} methods provided by the string utility library described in Section \ref{sec:string_utilities}. These conversions largely follows the standard C++ parsing, with one important exception. If (and only if) the value is retrieved as any C/C++ string type and the string is fully enclosed by a pair of \texttt{"} characters, they are stripped before returning the value (and strings can thus also be given without quotation marks).
\subsection{Modules and the Module Manager}
\label{sec:module_manager}
\apsq is a modular framework, the core idea is to separate functionality in various independent modules. The modules are defined in the subdirectory \textit{src/modules/} in the repository. The name of the directory is the unique name of the module. The suggested naming scheme is CamelCase, thus an example module would be \textit{GenericPropagation}. There are two different kind of modules which can be defined:
\begin{itemize}
\item \textbf{Unique}: Modules for which always a single instance runs irrespective of the number of detectors.
\item \textbf{Detector}: Modules that are specific to a single detector. They are replicated for all required detectors.
\end{itemize}
The type of module determines the kind of constructor used, the internal unique name and the supported configuration parameters. More details about the instantiation logic for the different kind of modules can be found in \ref{sec:module_instantiation}.
\subsubsection{Files of a Module}
\label{sec:module_files}
Every module directory should at the minimum contain the following documents (with \texttt{ModuleName} replaced by the name of the module):
\begin{itemize}
\item \textbf{CMakeLists.txt}: The build script to load the dependencies and define the source files of the library.
\item \textbf{README.md}: Full documentation of the module.
\item \textbf{\texttt{ModuleName}Module.hpp}: The header file of the module (note that another name can be used for this source file, but that is deprecated).
\item \textbf{\texttt{ModuleName}Module.cpp}: The implementation file of the module.
\end{itemize}
The files are discussed in more detail below. All modules that are added to the \textit{src/modules/} directory will be build automatically by CMake. This also means that all subdirectories in this module directory should contain a module with a \textit{CMakeLists.txt} to build the module.
More information about constructing new modules can be found in Section \ref{sec:building_new_module}.
\paragraph{CMakeLists.txt}
Contains the build description of the module with the following components:
\begin{enumerate}
\item On the first line either ALLPIX\_DETECTOR\_MODULE(MODULE\_NAME) or \\ ALLPIX\_UNIQUE\_MODULE(MODULE\_NAME) depending on the type of the module defined. The internal name of the module is saved to the \$\{MODULE\_NAME\} variable which should be used as argument to the other functions. Another name can be used as well, but below we exclusively use \$\{MODULE\_NAME\}
\item The next lines should contain the logic to load the dependencies of the module (below is an example to load Geant4). Only ROOT is automatically included and linked to the module.
\item A line with ALLPIX\_MODULE\_SOURCES(\$\{MODULE\_NAME\} \texttt{sources}) where \texttt{sources} should be replaced by all the source files of this module
\item Possibly lines to include the directories and link the libraries for all the dependencies loaded earlier as explained in point~2. See below for an example.
\item A line containing ALLPIX\_MODULE\_INSTALL(\$\{MODULE\_NAME\}) to setup the required target for the module to be installed to.
\end{enumerate}
An example of a simple CMakeLists.txt of a module named \texttt{Test} which requires Geant4 is the following
\vspace{5pt}
\begin{minted}[frame=single,framesep=3pt,breaklines=true,tabsize=2,linenos]{cmake}
# Define module and save name to MODULE_NAME
# Replace by ALLPIX_DETECTOR_MODULE(MODULE_NAME) to define a detector module
ALLPIX_UNIQUE_MODULE(MODULE_NAME)
# Load Geant4
FIND_PACKAGE(Geant4)
IF(NOT Geant4_FOUND)
MESSAGE(FATAL_ERROR "Could not find Geant4, make sure to source the Geant4 environment\n$ source YOUR_GEANT4_DIR/bin/geant4.sh")
ENDIF()
# Add the sources for this module
ALLPIX_MODULE_SOURCES(${MODULE_NAME}
TestModule.cpp
)
# Add Geant4 to the include directories
TARGET_INCLUDE_DIRECTORIES(${MODULE_NAME} SYSTEM PRIVATE ${Geant4_INCLUDE_DIRS})
# Link the Geant4 libraries to the library
TARGET_LINK_LIBRARIES(${MODULE_NAME} ${Geant4_LIBRARIES})
# Provide standard install target
ALLPIX_MODULE_INSTALL(${MODULE_NAME})
\end{minted}
\paragraph{README.md}
The README.md serves as the documentation for the module and should be written in the Markdown format. It is automatically converted to \LaTeX using Pandoc~\cite{pandoc} and included in the documentation in Section \ref{sec:modules}. It is also viewable online at the repository in the subfolder of the module.
The README.md should follow the structure of the DummyModule in \textit{src/modules/Dummy}. The documentation should contain at least the following sections:
\begin{itemize}
\item The H2-size header with the name of the module which contains at least the following required elements: the \textbf{Maintainer} and the \textbf{Status} of the module. If the module is working and recently tested the status of the module should be \textit{Functional}. The maintainer line should contain the name of the module maintainer and its email address between parentheses. An example for a minimal header is therefore
\begin{verbatim}
## ModuleName
Maintainer: Example Author (<EMAIL>)
Status: Functional
\end{verbatim}
Besides this an header could also have the \textbf{Input} and \textbf{Output} points, which contain the name of the objects that are respectively read and dispatched by the module.
\item A H4-size section containing a short description of the module, named \textbf{Description}.
\item A H4-size section named \textbf{Parameters} with the parameters of the module. All parameters should be shortly explained in an itemized list with the name of the parameter in an inline code block.
\item A H4-size section with the name \textbf{Usage} which should contain at least one single simple example of a valid configuration that can be used.
\end{itemize}
\paragraph{\texttt{ModuleName}Module.hpp and \texttt{ModuleName}Module.cpp}
All modules should have both a header file and a source file. In the header file the module is defined together with all its method. Brief Doxygen documentation should be added to explain what every method does. The source file should provide the implementation of every method and also its more detailed Doxygen documentation. Not a single method should be defined in the header to keep the interface clean.
\subsubsection{Module structure}
\label{sec:module_structure}
All modules should inherit from the \texttt{Module} base class which can be found in \textit{src/core/module/Module.hpp}. The module base class provides two base constructors, a few convenient methods and several methods to override. Every module should provide a constructor taking a fixed set of arguments defined by the framework. This particular constructor is always called during construction by the module instantiation logic. The arguments for the constructor differs for unique and detector modules. For unique modules the constructor for a \texttt{TestModule} should be:
\begin{minted}[frame=single,framesep=3pt,breaklines=true,tabsize=2]{c++}
TestModule(Configuration config, Messenger* messenger, GeometryManager* geo_manager): Module(config) {}
\end{minted}
It is clear that the configuration object should be forwarded to the base module.
For unique modules the first two arguments are the same, but the last argument is a \texttt{std::shared\_ptr} to the linked detector instead. It should always forward this provided detector to the base class, besides the configuration. Thus a constructor of a detector module should be:
\begin{minted}[frame=single,framesep=3pt,breaklines=true,tabsize=2]{c++}
TestModule(Configuration config, Messenger* messenger, std::shared_ptr<Detector> detector): Module(config, detector) {}
\end{minted}
All modules receive the Configuration object holding the config parameters for that specific object, which can be accessed as explained in Section \ref{sec:accessing_parameters}. Furthermore, a pointer to the Messenger is passed which can be used to both bind variables to receive and dispatch messages as explained in \ref{sec:objects_messages}. Finally either a pointer to the GeometryManager is passed, which can be used to fetch all detectors, or a instance of the specifically linked detector. The constructor should normally be used to bind the required messages and set configuration defaults. In case of failure an exception can be thrown from the constructor.
In addition to the constructor every module can override the following methods:
\begin{itemize}
\item \texttt{init()}: Called after loading and constructing all modules and before starting the event loop. This method can for example be used to initialize histograms.
\item \texttt{run(unsigned int event\_number)}: Called for every event in the simulation run with the event number (starting from one). An exception should be thrown for every serious error, otherwise an warning should be logged.
\item \texttt{finalize()}: Called after processing all events in the run and before destructing the module. Typically used to save the output data (like histograms). Any exceptions should be thrown from here instead of the destructor.
\end{itemize}
\subsubsection{Module instantiation}
\label{sec:module_instantiation}
The modules are dynamically loaded and instantiated by the Module Manager. Modules are constructed, initialized, executed and finalized in the linear order they are defined in the configuration file. Thus the configuration file should follow the order of the real process. For every non-special section in the main configuration file (see \ref{sec:config_parameters} for more details) a corresponding library is searched which contains the module. A module has the name \textbf{libAllPixModule\texttt{ModuleName}} reflecting the \texttt{ModuleName} of a defined module. The module search order is as follows:
\begin{enumerate}
\item The modules already loaded before from an earlier section header
\item All directories in the global configuration parameter \textit{library\_directories} in the provided order if this parameter exists
\item The internal library paths of the executable, that should automatically point to the libraries that are build and installed together with the executable. These library paths are stored in RPATH on Linux, see the next point for more information.
\item The other standard locations to search for libraries depending on the operating system. Details about the procedure Linux follows are found in \cite{linuxld}.
\end{enumerate}
If the module definition is successful it is checked if the module is an unique or a detector module. The instantiation logic determines an unique name and priority, where a lower number indicates a higher priority, for every instantiation. The name and priority for the instantation are determined differently for the two types of modules:
\begin{itemize}
\item \textbf{Unique}: Combination of the name of the module and the \textbf{input} and \textbf{output} parameter (both defaulting to an empty string). The priority is always zero.
\item \textbf{Detector}: Combination of the name of the module, the \textbf{input} and \textbf{output} parameter (both defaulting to an empty string) and the name of detector this module runs on. If the name of the detector is specified directly by the \textbf{name} parameter the priority is zero. If the detector is only matched by the \textbf{type} parameter the priority is one. If the \textbf{name} and \textbf{type} are both not specified and the module is instantiated for all detectors there priority is two.
\end{itemize}
The instantiation logic only allows a single instance for every unique name. If there are multiple instantiations with the same unique name the instantiation with the highest priority is kept (thus the one with the lowest number). Otherwise if there are multiple instantiations with the same name and the same priority an exception is raised.
\subsection{Geometry and Detectors}
\label{sec:models_geometry}
Simulations are frequently run on a set of different detectors (such as as a beam telescope and a device under test). All these individual detectors together is what \apsq defines as the geometry. Every detector has a set of properties attached to it:
\begin{itemize}
\item A unique \textbf{name} to refer to the detector in the configuration.
\item The \textbf{position} in the world frame. This is the position of the geometric center of the sensitive device (sensor) given in world coordinates as X, Y and Z (note that any additional components like the chip and possible support layers are ignored when determining the geometric center).
\item The \textbf{orientation} given as Euler angles using the extrinsic Z-X-Z convention relative to the world frame (also known as the 1-3-1 or the "x-convention" and the most widely used definition of Euler angles~\cite{eulerangles}).
\item A \textbf{type} of a detector model. The model defines the geometry and parameters of the detector. Multiple detectors can share the same model (and this is in fact very common). Several ready-to-use models are shipped with the framework.
\item An optional \textbf{electric field} in the sensitive device. An electric field can be added to a detector by a special module as shown in Section \ref{sec:module_electric_field}.
\end{itemize}
The detector configuration is provided in the special detector configuration which is explained in Section \ref{sec:detector_config}.
\subsubsection{Changing and accessing the geometry}
The geometry is needed early on because it determines the number of detector instantiations as explained in Section \ref{sec:module_instantiation}. It is unfortunately not possible to determine the exact time the first detector instantation is created or the geometry is accessed in any other way. In the framework it is however very important to have a fixed geometry that does not change between modules. To allow special modules to change the geometry before accessing it, the geometry manager uses the concept of lazy geometry closing. This means that the geometry will be closed as soon as a method is called which accesses the geometry (usually the \texttt{getDetectors} method). Detectors and models can only be added before the geometry is closed. At the point of closing all remaining detector models are loaded and linked to the detectors without models. The procedure of finding the detector models is explained in more detail in Section \ref{sec:detector_models}.
Geometry is typically directly added from the detector configuration file described in Section \ref{sec:detector_config}. The geometry manager parses this file on construction, the detector models are loaded and linked later during geometry closing as described above. It is also possible to add additional models and detectors directly using \texttt{addModel} and \texttt{addDetector} (before the geometry is closed). Furthermore it is possible to add additional points which should be part of the world geometry using \texttt{addPoint}. This can for example be used to add the beam source to the world geometry.
The detectors and models can be accessed by name and type through the geometry manager using respectively \texttt{getDetector} and \texttt{getModel}. Typically however all detectors are fetched at once using the \texttt{getDetectors} method. If the module is a detector-specific module its related Detector can be accessed through the \texttt{getDetector} method of the module base class instead (returns a null pointer for unique modules) as follows:
\begin{minted}[frame=single,framesep=3pt,breaklines=true,tabsize=2]{c++}
void run(unsigned int event_id) {
// Returns the linked detector
std::shared_ptr<Detector> detector = this->getDetector();
}
\end{minted}
\subsubsection{Coordinate systems}
All detectors have a fixed position in the world frame which has an arbitrary origin. Every detector also has a local coordinate system attached to it. The origin of this local coordinate system does usually not correspond with the geometric center of the sensitive device, which is the center of rotation of the detector in the global frame. The origin of the local coordinate system is instead based on the pixel grid in the sensor. The origin of the local coordinate system is fixed to the center of the first pixel in the grid, which allows for simpler calculations through the framework that are also easier to understand.
While the actual origin of the local coordinate system depends on the type of the model, there are fixed rules for the orientation of the coordinate system. The positive z-axis should point in the direction the particle beam is supposed to enter the sensor, perpendicular to the 2D pixel grid. The x-axis should be in the plane that defines the pixel grid. It should be in horizontal direction perpendicular to the direction of the beam, if the sensor is placed unrotated in a horizontal beam. The y-axis should be normal to both the x- and the z-axis in such a way that a right-handed coordinate system is constructed.
\subsubsection{Detector models}
\label{sec:detector_models}
Different types of detector models are already available and shipped with the framework. The configuration for these standard models use the configuration format introduced in Section \ref{sec:config_file_format} and can be found in the \textit{models} directory in the repository. Every models extends from the \texttt{DetectorModel} base class which defines the minimum parameter of a detector model in the framework:
\begin{itemize}
\item The coordinate of the center in the local frame. This is the location of the local point which is defined as position in the global frame, and the rotation center for the specified orientation.
\item The number of pixels in the sensor in both the x- and y-axis. Every pixel is an independent block replicated over the x,y-plane of the sensor.
\item The size of an individual pixel. The multiplication of the pixel size and the number of pixels is known as the pixel grid and goes over the full x,y-plane.
\item The sensor with a center and a size. The sensor is at least as large as the pixel grid size and has a certain thickness. It can have excess length in the x,y-plane in each direction.
\item The readout chip with a center and a size. It is positioned directly after the sensor by default. The chip can also have an excess as described above for the sensor.
\item Possible support layers with a center and a size. It is positioned directly after the sensor and the chip by default. The support layer can be of various materials and possibly contain a cutout.
\item Total size of the box with the local frame center in the middle that fit all elements of the model.
\end{itemize}
This standard detector model can be extended to provide a more detailed geometry if required by particular modules (most notably the Geant4 geometry builder). The position and size of all elements can be changed by these extending models. A model with only the standard elements described above is the \texttt{MonolithicPixelDetectorModel}. Currently the only extended detector model is the \texttt{HybridPixelDetectorModel}, which also include bump bonds between the sensor and the readout chip.
\nlparagraph{Detector model parameters}
\todo{This section should likely be moved to a separate chapter, but before that it should also be moved out of the core likely?}
Models are defined in configuration files which are used to instatiate the actual model classes. These files for detector models can contain various types of parameters. Some are required for all models, other optional for all models and there are also parameters only supported by certain types of models. For more details about the steps to perform to add and use your own new model, Section \ref{sec:adding_detector_model} should be consulted.
The set of base parameters supported by every models is provided below. These parameters should be given at the top of the file before opening any sections.
\begin{itemize}
\item \texttt{type}: A required parameter describing the type of the model. At the moment either \textbf{monolithic} or \textbf{hybrid}. This value determines any optional extra supported parameters discussed later.
\item \texttt{number\_of\_pixels}: The number of pixels in the 2D pixel grid. Determines the base size of the sensor together with the \texttt{pixel\_size} parameter below.
\item \texttt{pixel\_size}: The size of a single pixel in the pixel grid. Given in 2D as pixels do not have any direct thickness. This parameter is required for all models.
\item \texttt{sensor\_thickness}: Thickness of the active area of the detector model containing the individual pixels. This parameter is required for all models.
\item \texttt{sensor\_excess}: Fallback for the excess width of the sensor in all four directions (top, bottom, left and right). Used if the specialized parameters described below are not given. Defaults to zero, thus having a sensor size equal to the number of pixels times the size of a single pixel.
\item \texttt{sensor\_excess\_\textit{direction}}: With direction either \textit{top}, \textit{bottom}, \textit{left} or \textit{right}, where the top, bottom, right and left direction are respectively the positive y-axis, the negative y-axis, the positive x-axis and the negative x-axis. It specifies the extra excess length added to the sensor in the specific direction.
\item \texttt{chip\_thickness}: Thickness of the readout chip, placed next to the sensor.
\item \texttt{chip\_excess}: Fallback for the excess width of the chip, defaults to zero thus a chip equal to the size of the pixel grid. See the \texttt{sensor\_excess} parameter above.
\item \texttt{chip\_excess\_\textit{direction}}: With direction either \textit{top}, \textit{bottom}, \textit{left} or \textit{right}. The chip excess in the specific direction, see the \texttt{sensor\_excess\_\textit{direction}} parameter above.
\end{itemize}
Besides these base parameters, several base layers of support can be added to detector models. Every layer of support should be given in its own section with the name \texttt{support}. By default there are no support layers. The support layers support the following parameters.
\begin{itemize}
\item \texttt{size}: Size of the support in 2D (the thickness is given separately below). This parameter is required for all support layers.
\item \texttt{thickness}: Thickness of the support layers. This parameter is required for all support layers.
\item \texttt{offset}: Every support layer is centered by default to the center of the pixel grid (the rotation center of the model). An optional 2D offset can be specified using this parameter, this is disabled by default. The position in the depth axis can be specified separately with the parameter \texttt{location}.
\item \texttt{location}: Location of the support layer. Either \textit{sensor} to stick it to the sensor (on the opposite side of the chip) or \textit{chip} to add the support layer after the chip. Defaults to \textit{chip} if not given. Sensors are stacked in there respectively direction if multiple layers of support are given.
\item \texttt{hole\_size}: Adds an optional cut-out hole to the support with the 2D size provided. The hole always covers the full support thickness. No hole will be added if this parameter is not given.
\item \texttt{hole\_offset}: The hole is added by default to the center of the support layer. A 2D offset from this default center can be specified using this parameter.
\item \texttt{material}: Material of the support to use, given as a lowercase string. There is no default set of materials and support for certain types of materials is up to the modules. Refer to Section \ref{sec:modules} for details about the materials supported by the geometry creator module. \todo{This should be standardized...}
\end{itemize}
The base parameters are the only set of parameters supported by the \textbf{monolithic} model. The \textbf{hybrid} model add bump bonds between the chip and the sensor while automatically making sure the chip and support layers are shifted appropriately. The set of extra parameters for the \textbf{hybrid} model are the following (these should be put in the empty start section):
\begin{itemize}
\item \texttt{bump\_height}: Height of the bump bonds (the separation distance between the chip and the sensor)
\item \texttt{bump\_sphere\_radius}: The individual bump bonds are simulated as union solids of a sphere and a cylinder. This parameter set the radius of the sphere to use, which should generally be smaller than the height of the bump.
\item \texttt{bump\_cylinder\_radius}: The radius of the cylinder part of the bump. The height of the cylinder is determined by the \texttt{bump\_height} parameter.
\item \texttt{bump\_offset}: A 2D offset of the grid of bumps. The individual bumps are by default positioned at the center of all the single pixels in the grid.
\end{itemize}
\nlparagraph{Fetching specific models within the framework}
Some modules are specific for a particular type of detector model. To fetch a specific detector model from the base class, the model should be downcasted. An example to try fetching an \texttt{HybridPixelDetectorModel} is the following (the downcast return a null pointer if the class is not of the appropriate type).
\begin{minted}[frame=single,framesep=3pt,breaklines=true,tabsize=2]{c++}
// Detector is a pointer to a Detector object
auto model = detector->getModel();
auto hybrid_model = std::dynamic_pointer_cast<HybridPixelDetectorModel>(model);
if(hybrid_model != nullptr) {
// The model of this Detector is a HybridPixelDetectorModel
}
\end{minted}
\nlparagraph{Specializing detector models}
A detector model contains default values for all the parameters. Some parameters like the sensor thickness can however vary between different detectors of the same general model. To allow for easy adjustment of these parameters, models can be specialized in the detector configuration file introduced in \ref{sec:detector_config}. All of the model parameters, except the type parameter, in the header at the top (thus not the support layers) can be changed by adding a parameter with the exact same key to the detector model file with the specialized value. The framework will then internally automatically create a copy of this model with the requested change.
\nlparagraph{Search order for models}
To support different detector models and storage locations the framework supports model readers. The core geometry manager does also read models and will read all remaining models, not parsed earlier, before the geometry is closed. The model readers and the core geometry manager should search for model files in the following order.
\begin{enumerate}
\item If defined, the paths in the \textit{models\_path} parameter provided to the model reader module or the global \textit{models\_path} parameter if no module-specific one is defined (the geometry manager only uses the global one). Files are read and parsed directly. If the path is a directory, all files in the directory are added (not recursing into subdirectories).
\item The location where the models are installed to (see the MODEL\_DIRECTORY variable in Section \ref{sec:cmake_config}).
\item The standard data paths on the system as given by the environmental variable \$XDG\_DATA\_DIRS with the \project-directory appended. The \$XDG\_DATA\_DIRS variable defaults to \textit{/usr/local/share/} (thus effectively \textit{/usr/local/share/\project}) followed by \textit{/usr/share/} (effectively \textit{/usr/share/\project}).
\end{enumerate}
For almost all purposes a specific model reader is not needed and all internal models can be read by the geometry manager.
\todo{This should include more details about other model readers or it should be removed}
\subsection{Passing Objects using Messages}
\label{sec:objects_messages}
Communication between modules happens through messages (only some internal information is shared through external detector objects and the dependencies like Geant4). Messages are templated instantiations of the \texttt{Message} class carrying a vector of objects. The list of objects available in the \apsq objects library are discussed in Section \ref{sec:objects}. The messaging system has a dispatching part to send messages and a receiving part that fetches messages.
The dispatching module can specify an optional name for the messages, but modules should normally not specify this name directly. If the name is not directly given (or equal to \texttt{-}) the \textbf{output} parameter of the module is used to determine the name of the message, defaulting to an empty string. Dispatching the message to their receivers then goes by the following rules:
\begin{enumerate}
\item The receiving module the will \underline{only} receive a message if it has the exact same type as the message dispatched (thus carrying the exact same object). If the receiver is however listening to the \texttt{BaseMessage} type it will receive all dispatched messages instead.
\item The receiving module will \underline{only} receive messages with the exact same name as it is listening for. The module uses the \textbf{input} parameter to determine to which message names the module should listen. If the \textbf{input} parameter is equal to \texttt{*} the module should listen to all messages. Every module listens by default to messages with no name specified (thus receiving the messages of default dispatching modules).
\item If the receiving module is a detector module it will \underline{only} receive messages that are bound to that specific detector \underline{or} messages that are not bound to any detector.
\end{enumerate}
An example how to dispatch, in the \texttt{run} function of a module, a message containing an array of \texttt{Object} types bound to a detector named \texttt{dut} is provided here:
\begin{minted}[frame=single,framesep=3pt,breaklines=true,tabsize=2]{c++}
void run(unsigned int event_id) {
std::vector<Object> data;
// .. fill the data vector with objects ...
// The message is dispatched only for 'dut' detector
std::shared_ptr<Message<Object>> message = std::make_shared<Message<Object>>(data, "dut");
// Send the message using the Messenger object
messenger->dispatchMessage(message);
}
\end{minted}
\subsubsection{Methods to process messages}
The message system has multiple methods to process received messages. The first two are the most common methods and the third should only be used if necessary. The options are:
\begin{enumerate}
\item Bind a \textbf{single message} to a variable. This should usually be the preferred method as most modules only expect one message to arrive per event (as a module should typically send only one message containing the list of all the objects it should send). An example of how to bind a message containing an array of \textbf{Object} types in the constructor of a detector \texttt{TestModule} would be:
\begin{minted}[frame=single,framesep=3pt,breaklines=true,tabsize=2]{c++}
TestModule(Configuration, Messenger* messenger, std::shared_ptr<Detector>) {
messenger->bindSingle(this,
/* Pointer to the message variable */
&TestModule::message,
/* No special messenger flags */
MsgFlags::NONE);
}
std::shared_ptr<Message<Object>> message;
\end{minted}
\item Bind a \textbf{set of messages} to an vector variable. This method should be used it the module can (and expects to) receive the same message multiple times (possibly because it wants to receive the same type of message for all detectors). An example to bind multiple messages containing an array of \textbf{Object} types in the constructor of a detector \texttt{TestModule} would be:
\begin{minted}[frame=single,framesep=3pt,breaklines=true,tabsize=2]{c++}
TestModule(Configuration, Messenger* messenger, std::shared_ptr<Detector>) {
messenger->bindMulti(this,
/* Pointer to the message vector */
&TestModule::messages,
/* No special messenger flags */
MsgFlags::NONE);
}
std::vector<std::shared_ptr<Message<Object>>> messages;
\end{minted}
\item Listen to a particular message type and execute a \textbf{listener function} as soon as an object is received. Can be used for more advanced strategies for fetching messages. Note that this method can lead to surprising behaviour because the listener function is executed during the run of the dispatching module (leading to log messages with incorrect section headers at the minimum). The listening module should \underline{not} do any heavy work in the listening function as this is supposed to take place in their \texttt{run} method instead. An example of using this to listen to a message containing an array of \texttt{Object} types in a detector \texttt{TestModule} would be:
\begin{minted}[frame=single,framesep=3pt,breaklines=true,tabsize=2]{c++}
TestModule(Configuration, Messenger* messenger, std::shared_ptr<Detector>) {
messenger->registerListener(this,
/* Pointer to the listener method */
&TestModule::listener,
/* No special message flags */
MsgFlags::NONE);
}
void listener(std::shared_ptr<Message<Object>> message) {
// Do something with received message ...
}
\end{minted}
\end{enumerate}
\subsubsection{Message flags}
Various flags can be added to the bind function and listening functions. The flags enable a particular behaviour of the framework (if the particular type of method supports the flag).
\begin{itemize}
\item \textbf{REQUIRED}: Specify that this message is required to be received. If the particular type of message is not received before it is time to execute the run function, the run is automatically skipped by the framework. This can be used to ignore modules that cannot do any action without received messages, for example propagation without any deposited charges.
\item \textbf{NO\_RESET}: Messages are by default automatically reset after the \texttt{run} function executes to prevent older messages from previous runs to appear again. This behaviour can be disabled by setting this flag (this does not have any effect for listening functions). Setting this flag for single bound messages (without ALLOW\_OVERWRITE) would cause an exception to be raised if the message is overwritten in a later event.
\item \textbf{ALLOW\_OVERWRITE}: By default an exception is automatically raised if a single bound message is overwritten (thus setting it multiple times instead of once). This flag prevents this behaviour. It is only used for variables to a single message.
\item \textbf{IGNORE\_NAME}: If this flag is specified, the name of the dispatched message is not considered. Thus the \textbf{input} parameter is ignored and forced to the value \texttt{*}.
\end{itemize}
\subsection{Logging and other Utilities}
\label{sec:logging_utilities}
The \apsq framework provides a set of utilities that can be attributed to two types:
\begin{itemize}
\item Two utilities to improve the usability of the framework. One of these is a flexible and easy-to-use logging system, introduced below in Section \ref{sec:logger}. The other is an easy-to-use framework for units that supports converting arbitrary combinations of units to an independent number which can be used transparently through the framework. It will be discussed in more detail in Section \ref{sec:unit_system}.
\item A few utilities to extend the functionality provided by the C++ Standard Template Library (STL). These are provided to provide functionality the C++14 standard lacks (like filesystem support). The utilities are used internally in the framework and are only shortly discussed here. The utilities falling in this category are the filesystem functions (see Section \ref{sec:filesystem}) and the string utilies (see Section \ref{sec:string_utilities}).
\end{itemize}
\subsubsection{Logging system}
\label{sec:logger}
The logging system is build to handle input/output in the same way as \texttt{std::cin} and \texttt{std::cout}. This approach is both very flexible and easy to read. The system is globally configured, thus there exists only one logger, and no special local versions. To send a message to the logging system at a level of \textbf{LEVEL}, the following can be used:
\begin{minted}[frame=single,framesep=3pt,breaklines=true,tabsize=2,linenos]{c++}
LOG(LEVEL) << "this is an example message with an integer and a double " << 1 << 2.0;
\end{minted}
A newline is added at the end of every log message. Multi-line log messages can also be used: the logging system will automatically align every new line under the previous message and will leave the header space empty on the new lines.
The system also allows for producing a message which is updated on the same line for simple progress bar like functionality. It is enabled using the \texttt{LOG\_PROCESS(LEVEL, IDENTIFIER)} macro (where the \texttt{IDENTIFIER} is a special string to determine if the output should be written to the same line or not). If the output is a terminal screen the logging output will be colored to make it prettier to read. This will be disabled automatically for all devices that are not terminals.
More details about the various logging levels can be found in Section \ref{sec:logging_verbosity}.
\subsubsection{Unit system}
\label{sec:unit_system}
Correctly handling units and conversions is of paramount importance. Having a separate C++ type for all different kind of units would however be too cumbersome for a lot of operations. Therefore the units are stored in standard C++ floating point types in a default unit which all the code in the framework uses for calculations. In configuration files as well as for logging it is however very useful to provide quantities in a different unit.
The unit system allows adding, retrieving, converting and displaying units. It is a global system transparently used throughout the framework. Examples of using the unit system are given below:
\begin{minted}[frame=single,framesep=3pt,breaklines=true,tabsize=2,linenos]{c++}
// Define the standard length unit and an auxiliary unit
Units::add("mm", 1);
Units::add("m", 1e3);
// Define the standard time unit
Units::add("ns", 1);
// Get the units given in m/ns in the defined framework unit mm/ns
Units::get(1, "m/ns");
// Get the framework unit of mm/ns in m/ns
Units::convert(1, "m/ns");
// Give the unit in the best type (lowest number above one) as string
// input is default unit 2000mm/ns and 'best' output is 2m/ns (string)
Units::display(2e3, {"mm/ns", "m/ns"});
\end{minted}
More details about how the unit system is used within \apsq can be found in Section \ref{sec:config_values}.
\subsubsection{Internal utilities}
\paragraph{Filesystem}
\label{sec:filesystem}
Provides functions to convert relative to absolute canonical paths, to iterate through all files in a directory and to create new directories. These functions should be replaced by the C++17 filesystem API~\cite{cppfilesystem} as soon as the framework minimum standard is updated to C++17.
\paragraph{String utilities}
\label{sec:string_utilities}
The STL only provides string conversions for standard types using \texttt{std::stringstream} and \texttt{std::to\_string}. It does not allow to parse strings encapsulated in pairs of \texttt{"} characters and neither does it allow to integrate different units. Furthermore it does not provide wide flexibility to add custom conversions for other external types in either way. The \apsq \texttt{to\_string} and \texttt{from\_string} do allow for these flexible conversions and it it extensively used in the configuration system. Conversions of numeric types with a unit attached are automatically resolved using the unit system discussed in Section \ref{sec:unit_system}. The \apsq tools system contain extensions to allow automatic conversions for ROOT and Geant4 types as explained in Section \ref{sec:root_and_geant4_utilities}. The string utilities also include trim and split strings functions as they are missing in the STL.
\subsection{Error Reporting and Exceptions}
\label{sec:error_reporting_exceptions}
\apsq generally follows the principle to throw exceptions in all cases where something is definitely wrong, it should never try to circumvent problems. Also error codes are not supposed to be returned, only exceptions should be used to report fatal errors. Exceptions are also thrown to signal for errors in the user configuration. The asset of this method is that configuration and code is more likely to do what they are supposed to do.
For warnings and informational messages the logging should be used extensively. This helps in both following the progress of the simulation as well as for debugging problems. Care should however be taken to limit the amount of messages outside of the \texttt{DEBUG} and \texttt{TRACE} levels. More details about the log levels and their usage is given in Section \ref{sec:logging_verbosity}.
The base exceptions in \apsq are available in the utilities. The most important exception base classes are the following:
\begin{itemize}
\item \textbf{ConfigurationError}: All errors related to incorrect user configuration. Could be a non-existing configuration file, a missing key or an invalid parameter value.
\item \textbf{RuntimeError}: All other errors arising at run-time. Could be related to incorrect configuration if messages are not correctly passed or non-existing detectors are specified. Could also be raised if errors arise while loading a library or running a module.
\item \textbf{LogicError}: Problems related to modules that do not properly follow the specifications, for example if a detector module fails to pass the detector to the constructor. These methods should never be raised for a well-behaving module and should therefore not be triggerable by users. Reporting these type of errors can help developers during their development of new modules.
\end{itemize}
Outside of the core framework, exceptions can also be used directly by the modules. There are only two exceptions which should be used by typical modules to indicate errors:
\begin{itemize}
\item \textbf{InvalidValueError}: Available under the subset of configuration exceptions. Signals any problem with the value of a configuration parameter that is not related to either the parsing or the conversion to the required type. Can for example be used for parameters where the possible valid values are limited, like the set of logging levels, or for paths that do not exist. An example is shown below:
\begin{minted}[frame=single,framesep=3pt,breaklines=true,tabsize=2,linenos]{c++}
void run(unsigned int event_id) {
// Fetch a key from the configuration
std::string value = config.get("key");
// Check if it is a 'valid' value
if(value != 'A' && value != "B") {
// Raise an error if it the value is not valid
// provide configuration object, key and an explanation
throw InvalidValueError(config, "key", "A and B are the only allowed values");
}
}
\end{minted}
\item \textbf{ModuleError}: Available under the subset of module exceptions. Should be used to indicate any runtime error in a module that is not directly caused by an invalid configuration value. For example if it is not possible to write an output. A reason should be given to indicate what the problem is. \todo{The module class should be passed as well, so the module name can be displayed in the error message}
\end{itemize}
\todo{add more info about error reporting style?}
<file_sep>/src/modules/DetectorHistogrammer/README.md
## DetectorHistogrammer
**Maintainer**: <NAME> (<<EMAIL>>)
**Status**: Functional (candidate for removal)
**Input**: PixelHit
#### Description
Creates a hitmap of all the pixels in the pixel grid, displaying the number of times a pixel has been hit during the simulation run. Also creates a histogram of the cluster size for every event. Should only be used for quick inspection and checks. For more sophisticated analyses the output from one of the output writers should be used to produce the necessary information.
#### Parameters
*No parameters*
#### Usage
This module is normally bound to a specific detector to plot, for example to the 'dut':
```ini
[DetectorHistogrammer]
name = "dut"
```
<file_sep>/src/modules/LCIOWriter/README.md
## LCIOWriter
**Maintainer**: <NAME> (<EMAIL>)
**Status**: Functional
**Input**: *PixelHitMessage*
#### Description
Writes pixel hit data to LCIO file, compatible to EUTelescope analysis framework.
#### Parameters
* `file_name`: LCIO file to write. Extension .slcio
* `pixel_type`: EUtelescope pixel type to create. Options: EUTelSimpleSparsePixelDefault = 1, EUTelGenericSparsePixel = 2, EUTelTimepix3SparsePixel = 5 (Default: EUTelGenericSparsePixel)
* `detector_name`: Detector name written to the run header. Default: "EUTelescope"
* `output_collection_name`: Name of the LCIO collection containing the pixel data. Default: "zsdata_m26"
#### Usage
```ini
[LCIOWriter]
```
<file_sep>/src/modules/DetectorHistogrammer/DetectorHistogrammerModule.cpp
/**
* @file
* @brief Implementation of detector histogramming module
* @copyright MIT License
*/
#include "DetectorHistogrammerModule.hpp"
#include <memory>
#include <string>
#include <utility>
#include "core/geometry/HybridPixelDetectorModel.hpp"
#include "core/messenger/Messenger.hpp"
#include "core/utils/log.h"
#include "tools/ROOT.h"
using namespace allpix;
DetectorHistogrammerModule::DetectorHistogrammerModule(Configuration config,
Messenger* messenger,
std::shared_ptr<Detector> detector)
: Module(config, detector), config_(std::move(config)), detector_(std::move(detector)), pixels_message_(nullptr) {
// Bind pixel hits message
messenger->bindSingle(this, &DetectorHistogrammerModule::pixels_message_, MsgFlags::REQUIRED);
}
void DetectorHistogrammerModule::init() {
// Fetch detector model
auto model = detector_->getModel();
// Create histogram of hitmap
LOG(TRACE) << "Creating histograms";
std::string histogram_name = "histogram";
std::string histogram_title = "Hitmap for " + detector_->getName() + ";x (pixels);y (pixels)";
histogram = new TH2I(histogram_name.c_str(),
histogram_title.c_str(),
model->getNPixels().x(),
-0.5,
model->getNPixels().x() - 0.5,
model->getNPixels().y(),
-0.5,
model->getNPixels().y() - 0.5);
// Create cluster size plot
std::string cluster_size_name = "cluster";
std::string cluster_size_title = "Cluster size for " + detector_->getName() + ";size;number";
cluster_size = new TH1I(cluster_size_name.c_str(),
cluster_size_title.c_str(),
model->getNPixels().x() * model->getNPixels().y(),
0.5,
model->getNPixels().x() * model->getNPixels().y() + 0.5);
}
void DetectorHistogrammerModule::run(unsigned int) {
LOG(DEBUG) << "Adding hits in " << pixels_message_->getData().size() << " pixels";
// Fill 2D hitmap histogram
for(auto& pixel_charge : pixels_message_->getData()) {
auto pixel_idx = pixel_charge.getPixel().getIndex();
// Add pixel
histogram->Fill(pixel_idx.x(), pixel_idx.y());
// Update statistics
total_vector_ += pixel_idx;
total_hits_ += 1;
}
// Fill cluster histogram
cluster_size->Fill(static_cast<double>(pixels_message_->getData().size()));
}
void DetectorHistogrammerModule::finalize() {
// Print statistics
if(total_hits_ != 0) {
LOG(INFO) << "Plotted " << total_hits_ << " hits in total, mean position is "
<< total_vector_ / static_cast<double>(total_hits_);
} else {
LOG(WARNING) << "No hits plotted";
}
// FIXME Set more useful spacing maximum for cluster size histogram
auto xmax = std::ceil(cluster_size->GetBinCenter(cluster_size->FindLastBinAbove()) + 1);
cluster_size->GetXaxis()->SetRangeUser(0, xmax);
// Set cluster size axis spacing
if(static_cast<int>(xmax) < 10) {
cluster_size->GetXaxis()->SetNdivisions(static_cast<int>(xmax) + 1, 0, 0, true);
}
// Set default drawing option histogram for hitmap
histogram->SetOption("colz");
// Set histogram axis spacing
if(static_cast<int>(histogram->GetXaxis()->GetXmax()) < 10) {
histogram->GetXaxis()->SetNdivisions(static_cast<int>(histogram->GetXaxis()->GetXmax()) + 1, 0, 0, true);
}
if(static_cast<int>(histogram->GetYaxis()->GetXmax()) < 10) {
histogram->GetYaxis()->SetNdivisions(static_cast<int>(histogram->GetYaxis()->GetXmax()) + 1, 0, 0, true);
}
// Write histograms
LOG(TRACE) << "Writing histograms to file";
histogram->Write();
cluster_size->Write();
}
<file_sep>/src/core/geometry/exceptions.h
/**
* @file
* @brief Collection of all geometry exceptions
*
* @copyright MIT License
*/
#ifndef ALLPIX_GEOMETRY_EXCEPTIONS_H
#define ALLPIX_GEOMETRY_EXCEPTIONS_H
#include <string>
#include "core/utils/exceptions.h"
#include "core/utils/type.h"
namespace allpix {
/**
* @ingroup Exceptions
* @brief Indicates an error with finding a detector by name
*/
class InvalidDetectorError : public RuntimeError {
public:
/**
* @brief Constructs an error with a detector that is not found
* @param name Identifier for the detector that is not found
*/
explicit InvalidDetectorError(const std::string& name) {
error_message_ = "Could not find a detector with name '" + name + "'";
}
};
/**
* @ingroup Exceptions
* @brief Indicates an error that the detector model is not found
*/
class InvalidModelError : public RuntimeError {
public:
/**
* @brief Constructs an error with a model that is not found
* @param name Identifier for the model that is not found
*/
explicit InvalidModelError(const std::string& name) {
error_message_ = "Could not find a detector model of type '" + name + "'";
}
};
/**
* @ingroup Exceptions
* @brief Indicates an attempt to add a detector that is already registered before
*/
class DetectorExistsError : public RuntimeError {
public:
/**
* @brief Constructs an error for a non unique detector
* @param name Name of the detector that is added earlier
*/
explicit DetectorExistsError(const std::string& name) {
error_message_ = "Detector with name " + name + " is already registered, detector names should be unique";
}
};
/**
* @ingroup Exceptions
* @brief Indicates an attempt to add a detector model that is already registered before
*/
class DetectorModelExistsError : public RuntimeError {
public:
/**
* @brief Constructs an error for a non unique model
* @param name Name of the model that is added earlier
*/
explicit DetectorModelExistsError(const std::string& name) {
error_message_ = "Model with type " + name + " is already registered, detector names should be unique";
}
};
} // namespace allpix
#endif /* ALLPIX_GEOMETRY_EXCEPTIONS_H */
<file_sep>/src/modules/ROOTObjectReader/README.md
## ROOTObjectReader
**Maintainer**: <NAME> (<<EMAIL>>)
**Status**: Functional
**Output**: *all objects in input file*
#### Description
Converts all the object data stored in the ROOT data file produced by ROOTObjectWriter back in to messages (see the description of ROOTObjectWriter for more information about the format). Reads all the trees defined in the data file that contain AllPix objects. Creates a message from the objects in the tree for every event (as long as the file contains the same number of events as used in the simulation).
Currently it is not yet possible to exclude objects from being read. In case not all objects should be converted to messages, these objects need to be removed from the file before the simulation is started.
#### Parameters
* `file_name` : Location of the ROOT file containing the trees with the object data
#### Usage
This module should be at the beginning of the main configuration. An example to read the objects from the file *data.root* is:
```ini
[ROOTObjectReader]
file_name = "data.root"
```
<file_sep>/doc/usermanual/chapters/getting_started.tex
\section{Getting Started}
After finishing the installation the first simulations can be runned. This Getting Started guide is written with a default installation in mind, meaning that some parts may not work if a custom installation was used. When the \textit{allpix} binary is used, this refers to the executable installed in \text{bin/allpix} in your installation path. Remember that before running any \apsq simulation, ROOT and likely Geant4 should be initialized. Refer to Section \ref{sec:initialize_dependencies} on instructions how to load those libraries.
\subsection{Configuration Files}
\label{sec:configuration_files}
The framework has to be configured with simple human-readable configuration files. The configuration format is described in detail in Section \ref{sec:config_file_format}. The configuration consists of several section headers within $[$ and $]$ brackets and a section without header at the start. Every section contain a set of key/value pairs separated by the \texttt{=} character. The \texttt{\#} is used to indicate comments.
The framework has the following three required layers of configuration files:
\begin{itemize}
\item The \textbf{main} configuration: The most important configuration file and the file that is passed directly to the binary. Contains both the global framework configuration and the list of modules to instantiate together with their configuration. An example can be found in the repository at \textit{etc/example.conf}. More details and a more thorough example are found in Section \ref{sec:main_config}.
\item The \textbf{detector} configuration passed to the framework to determine the geometry. Describes the detector setup, containing the position, orientation and model type of all detectors. An example is available in the repository at \textit{etc/example\_detector.conf}. Introduced in Section \ref{sec:detector_config}.
\item The detector \textbf{models} configuration. Contain the parameters describing a particular type of detector. Several models are already shipped by the framework, but new types of detectors can be easily added. See \textit{models/test.conf} in the repository for an example. Please refer to Section \ref{sec:adding_detector_model} for more details about adding new models.
\end{itemize}
Before going into depth on defining the required configuration files, first more detailed information about the configuration values is provided in the next paragraphs.
\subsubsection{Parsing types and units}
\label{sec:config_values}
The \apsq framework supports the use of a variety of types for all configuration values. The module specifies how the value type should be interpreted. An error will be raised if either the key is not specified in the configuration file, the conversion to the desired type is not possible, or if the given value is outside the domain of possible options. Please refer to the module documentation in Section \ref{sec:modules} for the list of module parameters and their types. Parsing the value roughly follows common-sense (more details can be found in Section \ref{sec:accessing_parameters}). A few special rules do apply:
\begin{itemize}
\item If the value is a \textbf{string} it may be enclosed by a single pair of double quotation marks (\texttt{"}), which are stripped before passing the value to the modules. If the string is not enclosed by the quotation marks all whitespace before and after the value is erased. If the value is an array of strings, the value is split at every whitespace or comma (\texttt{'}) that is not enclosed in quotation marks.
\item If the value is a \textbf{boolean}, either numerical (\texttt{0}, \texttt{1}) or textual (\texttt{false}, \texttt{true}) representations are accepted.
\item If the value is a \textbf{relative path} that path will be made absolute by adding the absolute path of the directory that contains the configuration file where the key is defined.
\item If the value is an \textbf{arithmetic} type, it may have a suffix indicating the unit. The list of base units is shown in Table \ref{tab:units}.
\end{itemize}
\begin{table}[h]
\centering
\caption{List of units supported by \apsq}
\label{tab:units}
\begin{tabular}{|l|l|l|}
\hline
\textbf{Quantity} & \textbf{Default unit} & \textbf{Auxiliary units} \\ \hline
\multirow{6}{*}{\textit{Length}} & \multirow{6}{*}{mm (millimeter)} & nm (nanometer) \\ \cline{3-3}
& & um (micrometer) \\ \cline{3-3}
& & cm (centimeter) \\ \cline{3-3}
& & dm (decimeter) \\ \cline{3-3}
& & m (meter) \\ \cline{3-3}
& & km (kilometer) \\ \hline
\multirow{4}{*}{\textit{Time}} & \multirow{4}{*}{ns (nanosecond)} & ps (picosecond) \\ \cline{3-3}
& & us (microsecond) \\ \cline{3-3}
& & ms (millisecond) \\ \cline{3-3}
& & s (second) \\ \hline
\multirow{4}{*}{\textit{Energy}} & \multirow{4}{*}{MeV (megaelectronvolt)} & eV (electronvolt) \\ \cline{3-3}
& & keV (kiloelectronvolt) \\ \cline{3-3}
& & GeV (gigaelectronvolt) \\ \hline
\textit{Temperature} & K (kelvin) & \\ \hline
\textit{Charge} & e (elementary charge) & C (coulomb) \\ \hline
\multirow{2}{*}{\textit{Voltage}} & \multirow{2}{*}{MV (megavolt)} & V (volt) \\ \cline{3-3}
& & kV (kilovolt) \\ \hline
\textit{Angle} & rad (radian) & deg (degree) \\ \hline
\end{tabular}
\end{table}
Combinations of base units can be specified by using the multiplication sign \texttt{*} and the division sign \texttt{/} that are parsed in linear order (thus $\frac{V m}{s^2}$ should be specified as $V*m/s/s$). The framework assumes the default units (as given in Table \ref{tab:units}) if the unit is not explicitly specified. It is recommended to always specify the unit explicitly for all parameters that are not dimensionless as well as for angles.
Examples of specifying key/values pairs of various types are given below
\begin{minted}[frame=single,framesep=3pt,breaklines=true,tabsize=2,linenos]{ini}
# All whitespace at the front and back is removed
first_string = string_without_quotation
# All whitespace within the quotation marks is kept
second_string = " string with quotation marks "
# Keys are split on whitespace and commas
string_array = "first element" "second element","third element"
# Integer and floats can be given in standard formats
int_value = 42
float_value = 123.456e9
# Units can be passed to arithmetic type
energy_value = 1.23MeV
time_value = 42ns
# Units are combined in linear order
acceleration_value = 1.0m/s/s
# Thus the quantity below is the same as 1.0deg*kV*K/m/s
random_quantity = 1.0deg*kV/m/s*K
# Relative paths are expanded to absolute
# Path below will be /home/user/test if the config file is in /home/user
output_path = "test"
# Booleans can be represented in numerical or textual style
my_switch = true
my_other_switch = 0
\end{minted}
\subsubsection{Detector configuration}
\label{sec:detector_config}
The detector configuration consist of a set of section headers describing the detectors in the setup. The section header describes the names used to identify the detectors. All names should be unique, thus using the same name multiple times is not possible. Every detector should contain all of the following parameters:
\begin{itemize}
\item A string referring to the \textbf{type} of the detector model. The model should exist in the search path described in Section \ref{sec:detector_models}.
\item The 3D \textbf{position} in the world frame in the order x, y, z. See Section \ref{sec:models_geometry} for details.
\item The \textbf{orientation} specified as Z-X-Z extrinsic Euler angle. This means the detector is rotated first around the world's Z-axis, then around the world's X-axis and then again around the global Z-axis. See Section \ref{sec:models_geometry} for details.
\end{itemize}
Furthermore it is possible to specialize certain parameters of the detector models, which is explained in more detail in Section \ref{sec:detector_models}.
\begin{figure}[t]
\centering
\includegraphics[width=0.6\textwidth]{telescope.png}
\caption{Particle passage through the telescope setup of the detector configuration file}
\label{fig:telescope}
\end{figure}
An example configuration file of one test detector and two Timepix~\cite{timepix} models is:
\inputminted[frame=single,framesep=3pt,breaklines=true,tabsize=2,linenos]{ini}{../../etc/manual_detector.conf}
Figure \ref{fig:telescope} shows the setup described in the file. This configuration is used in the rest of this chapter for explaining concepts.
\subsubsection{Main configuration}
\label{sec:main_config}
The main configuration consists of a set of section header that specify the modules used. All modules are executed in the \underline{linear} order in which they are defined. There are a few section names that have a special meaning in the main configuration, which are the following:
\begin{itemize}
\item The \textbf{global} (framework) header sections: These are all the zero-length section headers (including the one at the start) and all with the header \texttt{AllPix} (case-sensitive). These are combined and accessed together as the global configuration, which contain all the parameters of the framework (see Section \ref{sec:framework_parameters} for details). All key-value pairs defined in this section are also inherited by all individual configurations as long the key is not defined in the module configuration itself.
\item The \textbf{ignore} header sections: All sections with name \texttt{Ignore} are ignored. Key-value pairs defined in the section as well as the section itself are redundant. These sections are useful for quickly enabling and disabling for debugging.
\end{itemize}
All other section headers are used to instantiate the modules. Installed modules are loaded automatically. If problems arise please review the loading rules described in Section \ref{sec:module_instantiation}.
Modules can be specified multiple times in the configuration files, but it depends on their type and configuration if this allowed. The type of the module determines how the module is instantiated:
\begin{itemize}
\item If the module is \textbf{unique}, it is instantiated only a single time irrespective of the amount of detectors. These kind of modules should only appear once in the whole configuration file unless a different inputs and outputs are used as explained in Section \ref{sec:redirect_module_input_outputs}.
\item If the module is \textbf{detector}-specific, it is run on every detector it is configured to run on. By default an instantiation is created for all detectors defined in the detector configuration file (see Section \ref{sec:detector_config}) unless one or both of the following parameters are specified.
\begin{itemize}
\item \textbf{name}: An array of detector names where the module should run on. Replaces all global and type-specific modules of the same kind.
\item \textbf{type}: An array of detector type where the module should run on. Instantiated after considering all detectors specified by the name parameter above. Replaces all global modules of the same kind.
\end{itemize}
\end{itemize}
A valid example configuration using the detector configuration above could be:
\begin{minted}[frame=single,framesep=3pt,breaklines=true,tabsize=2,linenos]{ini}
# Key is part of the empty section and therefore the global sections
string_value = "example1"
# The location of the detector configuration should be a global parameter
detectors_file = "manual_detector.conf"
# The AllPix section is also considered global and merged with the above
[AllPix]
another_random_string = "example2"
# First run a unique module
[MyUniqueModule]
# This module takes no parameters
# [MyUniqueModule] cannot be instantiated another time
# Then run some detector modules on different detectors
# First run a module on the detector of type Timepix
[MyDetectorModule]
type = "timepix"
int_value = 1
# Replace the module above for `dut` with a specialized version
# this does not inherit any parameters from earlier modules
[MyDetectorModule]
name = "dut"
int_value = 2
# Runs the module on the remaining unspecified detector `telescope1`
[MyDetectorModule]
# int_value is not specified, so it uses the default value
\end{minted}
This configuration can however not be executed in practice because MyUniqueModule and MyDetectorModule do not exist. In the next paragraphs an useful configuration file with valid configuration is presented. Before turning to the module parameters the global framework parameters are introduced first.
\subsection{Framework parameters}
\label{sec:framework_parameters}
The framework has a variety of global parameters that allow to configure \apsq for different kind of simulations:
\begin{itemize}
\item \textbf{detectors\_file}: Location of the file describing the detector configuration (introduced in Section \ref{sec:detector_config}). The only \underline{required} global parameter: the framework will fail if it is not specified.
\item \textbf{number\_of\_events}: Determines the total number of events the framework should simulate. Equivalent to the amount of times the modules are run. Defaults to one (simulating a single event).
\item \textbf{log\_level}: Specifies the minimum log level which should be written. Possible values include \texttt{FATAL}, \texttt{STATUS}, \texttt{ERROR}, \texttt{WARNING}, \texttt{INFO} and \texttt{DEBUG}, where all options are case-insensitive. Defaults to the \texttt{INFO} level. More details and information about the log levels and changing it for a particular module can be found in Section \ref{sec:logging_verbosity}. Can be overwritten by the \texttt{-v} parameter on the command line.
\item \textbf{log\_format}: Determines the format to display. Possible options include \texttt{SHORT}, \texttt{DEFAULT} and \texttt{LONG}, where all options are case-insensitive. More information again in Section \ref{sec:logging_verbosity}.
\item \textbf{log\_file}: File where output should be written to besides standard output (usually the terminal). Only writes to standard output if this option is not provided. Another (additional) location to write to can be specified on the command line using the \texttt{-l} parameter.
\item \textbf{output\_directory}: Directory to write all output files into. Extra directories are created for all the module instantiations. This directory also contains the \textbf{root\_file} parameter described after. Defaults to the current working directory with the subdirectory \textit{output/} attached.
\item \textbf{root\_file}: Location relative to the \textbf{output\_directory}, where the ROOT output data of all modules will be written to. Default value is \textit{modules.root}. The directories will be created automatically for all the module instantiations in this ROOT file.
\item \textbf{random\_seed}: Seed to use for the global random seed generator used to initialize the seeds for the module instantiations. A random seed from multiple entropy sources will be generated if the parameter is not specified. Can be used to reproduce an earlier simulation run.
\item \textbf{library\_directories}: Additional directories to search for libraries, before searching the default paths. See Section \ref{sec:module_instantiation} for details.
\item \textbf{model\_path}: Additional files or directories from which detector models should be read besides the standard search locations. Refer to Section \ref{sec:detector_models} for more information.
\end{itemize}
With this information in mind it is time to setup a real simulation. Module parameters are shortly introduced when they are first used. For more details about these parameters the module documentation in Section \ref{sec:modules} should be consulted.
\subsection{Setting up the Simulation Chain}
\label{sec:setting_up_simulation_chain}
Below a simple, but complete simulation is described. A typical simulation in \apsq contains at least the following components.
\begin{itemize}
\item The \textbf{geometry builder}, responsible for creating the external Geant4 geometry from the internal geometry. In this document internal geometry refers to the parameters containing the geometry description in \apsq, while external geometry refers to the constructed Geant4 geometry used for deposition (and possibly visualization).
\item The \textbf{deposition} module that simulates the particle beam that deposits charge carriers in the detectors using the provided physics list (containing a description of the simulated interactions) and the geometry created above.
\item A \textbf{propagation} module that propagates the charges through the sensor.
\item A \textbf{transfer} module that transfers the charges from the sensor and assigns them to a pixel.
\item A \textbf{digitizer} module which converts the charges in the pixel to a detector hit, simulating the frontend electronics response.
\item An \textbf{output} module, saving the data of the simulation. At the moment output can be written as ROOT TTree as explained in more detail in Section \ref{sec:storing_output_data}.
\end{itemize}
In the example charges will be deposited in the three sensors from the detector configuration file in Section \ref{sec:detector_config}. Only the charges in the Timepix models are going to be propagated and digitized. The final results of hits in the device under test (dut) will be written to a ROOT histogram. A configuration file that implements this description is as follows:
\inputminted[frame=single,framesep=3pt,breaklines=true,tabsize=2,linenos]{ini}{../../etc/manual.conf}
The configuration above is available as \textit{etc/manual.conf}. The detector configuration file in Section \ref{sec:detector_config} can be found in \textit{etc/manual\_detector.conf}. The total simulation can then be executed by passing the configuration to the allpix binary as follows:
\begin{verbatim}
$ allpix -c etc/manual.conf
\end{verbatim}
The simulation should then start. It should output similar output as the example found in Appendix \ref{sec:example_output}. The final histogram of the hits will then be availabe in the ROOT file \textit{output/modules.root} as the local file \textit{DetectorHistogrammer/histogram}.
If problems occur, please make sure you have an up-to-date and properly installed version of \apsq (see the installation instructions in Section \ref{sec:installation}). If modules and models fail to load, more information about loading problems can be found in the detailed framework description in Section \ref{sec:framework}.
\subsection{Adding More Modules}
Before going to more advanced configurations, a few simple modules are discussed which a user might want to add.
\paragraph{Visualization}
Displaying the geometry and the particle tracks helps a lot in both checking and interpreting the results. Visualization is fully supported through Geant4, supporting all the options provided by Geant4~\cite{geant4vis}. Using the Qt viewer with the OpenGL driver is however the recommended option as long as the installed version of Geant4 supports it.
To add the visualization, the \texttt{VisualizationGeant4} section should be added at the end of the configuration file before running the simulation again. An example configuration with some useful parameters is given below:
\begin{minted}[frame=single,framesep=3pt,breaklines=true,tabsize=2,linenos]{ini}
[VisualizationGeant4]
# Setup to use the Qt gui
mode = "gui"
# Use simple visualization for huge speedup
simple_view = 1
# Set transparency percentage
transparency = 0.4
# Set viewing style (alternative is 'wireframe')
view_style = "surface"
# Color trajectories by charge of the particle
trajectories_color_mode = "charge"
trajectories_color_positive = "blue"
trajectories_color_neutral = "green"
trajectories_color_negative = "red"
\end{minted}
If it gives an error about Qt not being available the VRML viewer can be used as a replacement, but it is recommended to reinstall Geant4 with the Qt viewer included. To use the VRML viewer instead, follow the steps below:
\begin{itemize}
\item The VRML viewer should be installed on your operating system. Good options are for example FreeWRL and OpenVRML.
\item Subsequently two environmental parameters should be exported to inform Geant4 of the configuration. These include \texttt{G4VRMLFILE\_VIEWER} which should point to the location of the viewer and \texttt{G4VRMLFILE\_MAX\_FILE\_NUM} which should typically be set to 1 to prevent too many files from being created.
\item Finally the example section below should be added at the end of the configuration file before running the simulation again:
\end{itemize}
\begin{minted}[frame=single,framesep=3pt,breaklines=true,tabsize=2,linenos]{ini}
[VisualizationGeant4]
# Do not start the Qt gui
mode = "none"
# Use the VRML driver
driver = "VRML2FILE"
# Use simple visualization for huge speedup
simple_view = 1
# See more parameters above
\end{minted}
More information about all the possible configuration parameters can be found in the module documentation in Section \ref{sec:modules}.
\paragraph{Electric Fields}
\label{sec:module_electric_field}
The example configuration before already contained a module for adding a linear electric field to the sensitive detector. All detectors by default do not have any electric field. This will make the \texttt{GenericPropagation} module slow, because it will wait for the propagated charges to reach the end of the sensor, which can take a long time with diffusion solely. Therefore a simple linear electric field have been added to the sensors. The section below sets the electric field on every point in the pixel grid to the voltage divided by the thickness of the sensor.
\begin{minted}[frame=single,framesep=3pt,breaklines=true,tabsize=2,linenos]{ini}
# Add a electric field
[ElectricFieldReader]
# Set it to be linear
model = "linear"
# Bias voltage used to create the linear electric field
voltage = 50V
\end{minted}
\todo{The issue with slow propagation should be fixed}
A fully specified electric field in the detectors can also be provided using the .init format. The init format is format used by the PixelAV software~\cite{swartz2002detailed,swartz2003cms} after conversions from internal TCAD formats. These fields can be attached to specific detectors using the standard syntax for detector binding. A possible configuration would be:
\begin{minted}[frame=single,framesep=3pt,breaklines=true,tabsize=2,linenos]{ini}
[ElectricFieldReader]
# Bind the electric field to the timepix sensor
name = "tpx"
# Specify the model is in the init format
model = "init"
# Name of the file containing the electric field
file_name = "example_electric_field.init"
\end{minted}
An example electric field (which the name used above) can be found in the \textit{etc} directory of the \apsq repository.
To import the electric fields from TCAD into \apsq a converter tool is included in the framework. A detailed description on how this tool should be used and how it exactly works can be found in Section \ref{sec:tcad_electric_field_converter}.
\subsection{Redirect Module Inputs and Outputs}
\label{sec:redirect_module_input_outputs}
By default it is not allowed to have the same type of module (linked to the same detector), but in several cases it may be useful to run the same module with different settings. The \apsq framework support this by allowing to redirect the input and output data of every module. A module sends it output by default to all modules listening to the type of object it dispatches. It is however possible to specify a certain name in addition to the type of the data.
The output name of a module can be changed by setting the \textbf{output} parameter of the module to a unique value. The output of this module is then not sent anymore to modules without a configured input, because the default input listens only to data without a name. The \textbf{input} parameter of a particular receiving module should therefore be set to match the value of the \textbf{output} parameter. In addition it is allowed to set the \textbf{input} parameter to the special value \texttt{*} to indicate that it should listen to all messages irrespective of their name.
An example of a configuration with two settings for digitization is shown below:
\begin{minted}[frame=single,framesep=3pt,breaklines=true,tabsize=2,linenos]{ini}
# Digitize the propagated charges with low noise levels
[DefaultDigitizer]
# Specify an output identifier
output = "low_noise"
# Low amount of noise added by the electronics
electronics_noise = 100e
# Default values are used for the other parameters
# Digitize the propagated charges with high noise levels
[DefaultDigitizer]
# Specify an output identifier
output = "high_noise"
# High amount of noise added by the electronics
electronics_noise = 500e
# Default values are used for the other parameters
# Save histogram for 'low_noise' digitized charges
[DetectorHistogrammer]
# Specify input identifier
input = "low_noise"
# Save histogram for 'high_noise' digitized charges
[DetectorHistogrammer]
# Specify input identifier
input = "high_noise"
\end{minted}
\todo{Maybe we need an option to split the modules}
\subsection{Logging and Verbosity Levels}
\label{sec:logging_verbosity}
\apsq is designed to identify mistakes and implementation errors as early as possible and tries to give the user a clear indication about the problem. The amount of feedback can be controlled using different log levels. The global log level can be set using the global parameter \textbf{log\_level}. The log level can be overridden for a specific module by adding the \textbf{log\_level} parameter to that module. The following log levels are currently supported:
\begin{itemize}
\item \textbf{FATAL}: Indicates a fatal error that should and will lead to direct termination of the application. Typically only emitted in the main executable after catching exceptions, because exceptions are the preferred way of fatal error handling as discussed in Section \ref{sec:error_reporting_exceptions}. An example of a fatal error is an invalid configuration parameter.
\item \textbf{STATUS}: Important informational messages about the status of the simulation. Should only be used for informational messages that have to be logged in every run (unless the user wants to only fetch fatal errors)
\item \textbf{ERROR}: Severe error that should never happen during a normal well-configured simulation run. Frequently leads to a fatal error and can be used to provide extra information that may help in finding the reason of the problem. For example used to indicate the reason a dynamic library cannot be loaded.
\item \textbf{WARNING}: Indicate conditions that should not happen normally and possibly lead to unexpected results. The framework can however typically continue without problems after a warning. Can for example indicate that a output message is not used and that a module may therefore do unnecessary work.
\item \textbf{INFO}: Informatic messages about the physics process of the simulation. Contains summaries about the simulation details of every event and for the overall simulation. Should typically produce maximum one line of output per event.
\item \textbf{DEBUG}: In-depth details about the progress of the framework and all the physical details of the simulation. Produces large volumes of output per event usually and this level is therefore normally only used for debugging the physics simulation of the modules.
\item \textbf{TRACE}: Messages to trace what the framework or a module is currently doing. Does not contain any direct information unlike the \textbf{DEBUG} level above, but only indicates which part of the module or framework is currently running. Mostly used for software debugging or determining the speed bottleneck in simulations.
\end{itemize}
It is not recommended to set the \textbf{log\_level} higher than \textbf{WARNING} in a typical simulation as important messages could be missed.
The logging system does also support a few different formats to display the log messages. The following formats are supported for the global parameter \textbf{log\_format} and for the module parameter with the same name that overwrites it:
\begin{itemize}
\item \textbf{SHORT}: Displays the data in a short form. Includes only the first character of the log level followed by the section header and the message.
\item \textbf{DEFAULT}: The default format. Displays the date, log level, section header and the message itself.
\item \textbf{LONG}: Detailed logging format. Besides the information above, it also shows the file and line where the log message was produced. This can help in debugging modules.
\end{itemize}
More details about the logging system and the procedure for reporting errors in the code can be found in Section \ref{sec:logger} and \ref{sec:error_reporting_exceptions}.
\subsection{Storing Output Data}
\label{sec:storing_output_data}
Saving the output to persistent storage is of primary importance for later review and analysis. \apsq primarily uses ROOT for storing output data, because it supports writing arbitrary objects and is a standard tool in High-Energy Physics. The \texttt{ROOTObjectWriter} automatically saves all the data objects written by the modules to a TTree~\cite{roottree} for more information about TTrees). The module stores all different object types to a separate tree, creating a branch for every combination of detector and the name given to the output as explained in Section \ref{sec:redirect_module_input_outputs}. For each event, values are added to the leafs of the branches containing the data of the objects. This allows for easy histogramming of the acquired data over the total run using the ROOT utilities. Relations between objects within a single event are internally stored as TRef allowing to fetch related objects as long as these are loaded in memory. An exception is thrown when trying to fetch an object that is not loaded.
To save the output of all objects an \texttt{ROOTObjectWriter} has to be added with a \texttt{file\_name} parameter (without the root suffix) to specify the file location of the created ROOT file in the global output directory. The default file name is \texttt{data} which means that \textbf{data.root} is created in the output directory (next to the ROOT file which contain the output of the modules, having the name \textit{modules.root} by default). To replicate the default behaviour the following configuration can be used:
\begin{minted}[frame=single,framesep=3pt,breaklines=true,tabsize=2,linenos]{ini}
# The object writer listens to all output data
[ROOTObjectWriter]
# specify the output path (can be omitted as it the default)
file_name = "data.root"
\end{minted}
The generated output file can be further analyzed with ROOT macros. A simple macro for converting the results to a tree with standard branches for comparisons can be found in Section \ref{sec:root_analysis_macros}.
Besides using the generated tree for analysis it is also possible to read all the object data back in, to propagate it to further modules. This can be used to split the execution of several parts of the simulation in multiple independent steps, which can be executed after each order. The tree data can be read using a \texttt{ROOTObjectReader} module, that automatically dispatches all objects to the right detector with the correct name in the same event, using the internal name of the stored data. An example of using this module is the following:
\begin{minted}[frame=single,framesep=3pt,breaklines=true,tabsize=2,linenos]{ini}
# The object reader dispatches all objects in the tree
[ROOTObjectReader]
# path to the output data relative to the configuration file
file_name = "../output/data.root"
\end{minted}
\todo{more output formats have to be added probably, or do we require the user to do this through ROOT}
<file_sep>/src/objects/Object.cpp
/**
* @file
* @brief Implementation of Object base class
* @copyright MIT License
*/
#include "Object.hpp"
using namespace allpix;
Object::Object(const Object&) = default;
Object& Object::operator=(const Object&) = default;
Object::Object(Object&&) = default;
Object& Object::operator=(Object&&) = default;
ClassImp(Object)
<file_sep>/tools/tcad_dfise_converter/dfise_converter.h
#include <utility>
#include <vector>
#include "read_dfise.h"
// Interrupt handler
void interrupt_handler(int);
// Main function to execute converter
int main(int argc, char** argv);
// Interpolation of a single vertex
std::pair<Point, bool> barycentric_interpolation(Point query_point,
std::vector<Point> tetra_vertices,
std::vector<Point> tetra_vertices_field,
double tetra_volume);
<file_sep>/doc/usermanual/appendices/example_output.tex
\section{Output of Example Simulation}
\label{sec:example_output}
Possible output for the example simulation in Section \ref{sec:setting_up_simulation_chain} is given below:
\begin{lstlisting}[breaklines]
(S) Welcome to AllPix v0.3alpha3+46^g5ec02fc
(S) Initialized PRNG with system entropy seed 18103434008225088751
(S) Loading module GeometryBuilderGeant4
(S) Loading module DepositionGeant4
(S) Loading module ElectricFieldReader
(S) Loading module GenericPropagation
(S) Loading module SimpleTransfer
(S) Loading module DefaultDigitizer
(S) Loading module DetectorHistogrammer
(S) Loading module VisualizationGeant4
(S) Loaded 8 modules
(S) Initializing 15 module instantiations
(I) [I:DepositionGeant4] Not depositing charges in telescope1 because there is no listener for its output
(I) [I:ElectricFieldReader:telescope1] Set linear electric field with magnitude 125V/mm
(I) [I:ElectricFieldReader:dut] Set linear electric field with magnitude 166.667V/mm
(I) [I:ElectricFieldReader:telescope2] Set linear electric field with magnitude 166.667V/mm
(S) Initialized 15 module instantiations
(S) Running event 1 of 5
(W) [R:DepositionGeant4] Dispatched message Message<allpix::MCParticle> from DepositionGeant4 has no receivers!
(I) [R:DepositionGeant4] Deposited 64446 charges in sensor of detector dut
(W) [R:DepositionGeant4] Dispatched message Message<allpix::MCParticle> from DepositionGeant4 has no receivers!
(I) [R:DepositionGeant4] Deposited 29457 charges in sensor of detector telescope2
(I) [R:GenericPropagation:dut] Propagated 64446 charges in 1291 steps in average time of 8.9313ns
(I) [R:GenericPropagation:telescope2] Propagated 29457 charges in 590 steps in average time of 6.70671ns
(I) [R:SimpleTransfer:dut] Transferred 64446 charges to 5 pixels
(I) [R:SimpleTransfer:telescope2] Transferred 29457 charges to 4 pixels
(I) [R:DefaultDigitizer:dut] Digitized 3 pixel hits
(I) [R:DefaultDigitizer:telescope2] Digitized 4 pixel hits
(W) [R:DefaultDigitizer:telescope2] Dispatched message Message<allpix::PixelHit> from DefaultDigitizer:telescope2 has no receivers!
(S) Running event 2 of 5
(W) [R:DepositionGeant4] Dispatched message Message<allpix::MCParticle> from DepositionGeant4 has no receivers!
(I) [R:DepositionGeant4] Deposited 41587 charges in sensor of detector dut
(W) [R:DepositionGeant4] Dispatched message Message<allpix::MCParticle> from DepositionGeant4 has no receivers!
(I) [R:DepositionGeant4] Deposited 24972 charges in sensor of detector telescope2
(I) [R:GenericPropagation:dut] Propagated 41587 charges in 834 steps in average time of 4.85424ns
(I) [R:GenericPropagation:telescope2] Propagated 24972 charges in 500 steps in average time of 6.70781ns
(I) [R:SimpleTransfer:dut] Transferred 41587 charges to 5 pixels
(I) [R:SimpleTransfer:telescope2] Transferred 24972 charges to 4 pixels
(I) [R:DefaultDigitizer:dut] Digitized 5 pixel hits
(I) [R:DefaultDigitizer:telescope2] Digitized 4 pixel hits
(W) [R:DefaultDigitizer:telescope2] Dispatched message Message<allpix::PixelHit> from DefaultDigitizer:telescope2 has no receivers!
(S) Running event 3 of 5
(W) [R:DepositionGeant4] Dispatched message Message<allpix::MCParticle> from DepositionGeant4 has no receivers!
(I) [R:DepositionGeant4] Deposited 23107 charges in sensor of detector dut
(W) [R:DepositionGeant4] Dispatched message Message<allpix::MCParticle> from DepositionGeant4 has no receivers!
(I) [R:DepositionGeant4] Deposited 48352 charges in sensor of detector telescope2
(I) [R:GenericPropagation:dut] Propagated 23107 charges in 464 steps in average time of 6.55353ns
(I) [R:GenericPropagation:telescope2] Propagated 48352 charges in 968 steps in average time of 6.7221ns
(I) [R:SimpleTransfer:dut] Transferred 23107 charges to 5 pixels
(I) [R:SimpleTransfer:telescope2] Transferred 48352 charges to 4 pixels
(I) [R:DefaultDigitizer:dut] Digitized 4 pixel hits
(I) [R:DefaultDigitizer:telescope2] Digitized 4 pixel hits
(W) [R:DefaultDigitizer:telescope2] Dispatched message Message<allpix::PixelHit> from DefaultDigitizer:telescope2 has no receivers!
(S) Running event 4 of 5
(W) [R:DepositionGeant4] Dispatched message Message<allpix::MCParticle> from DepositionGeant4 has no receivers!
(I) [R:DepositionGeant4] Deposited 96395 charges in sensor of detector dut
(W) [R:DepositionGeant4] Dispatched message Message<allpix::MCParticle> from DepositionGeant4 has no receivers!
(I) [R:DepositionGeant4] Deposited 32394 charges in sensor of detector telescope2
(I) [R:GenericPropagation:dut] Propagated 96395 charges in 1933 steps in average time of 7.78803ns
(I) [R:GenericPropagation:telescope2] Propagated 32394 charges in 648 steps in average time of 6.71757ns
(I) [R:SimpleTransfer:dut] Transferred 96395 charges to 9 pixels
(I) [R:SimpleTransfer:telescope2] Transferred 32394 charges to 4 pixels
(I) [R:DefaultDigitizer:dut] Digitized 7 pixel hits
(I) [R:DefaultDigitizer:telescope2] Digitized 4 pixel hits
(W) [R:DefaultDigitizer:telescope2] Dispatched message Message<allpix::PixelHit> from DefaultDigitizer:telescope2 has no receivers!
(S) Running event 5 of 5
(W) [R:DepositionGeant4] Dispatched message Message<allpix::MCParticle> from DepositionGeant4 has no receivers!
(I) [R:DepositionGeant4] Deposited 24758 charges in sensor of detector dut
(W) [R:DepositionGeant4] Dispatched message Message<allpix::MCParticle> from DepositionGeant4 has no receivers!
(I) [R:DepositionGeant4] Deposited 26760 charges in sensor of detector telescope2
(I) [R:GenericPropagation:dut] Propagated 24758 charges in 496 steps in average time of 7.10464ns
(I) [R:GenericPropagation:telescope2] Propagated 26760 charges in 536 steps in average time of 6.68743ns
(I) [R:SimpleTransfer:dut] Transferred 24758 charges to 5 pixels
(I) [R:SimpleTransfer:telescope2] Transferred 26760 charges to 4 pixels
(I) [R:DefaultDigitizer:dut] Digitized 3 pixel hits
(I) [R:DefaultDigitizer:telescope2] Digitized 4 pixel hits
(W) [R:DefaultDigitizer:telescope2] Dispatched message Message<allpix::PixelHit> from DefaultDigitizer:telescope2 has no receivers!
(S) Finished run of 5 events
(I) [F:DepositionGeant4] Deposited total of 824456 charges in 4 sensor(s) (average of 41222 per sensor for every event)
(I) [F:GenericPropagation:dut] Propagated total of 250293 charges in 5018 steps in average time of 7.41286ns
(I) [F:GenericPropagation:telescope2] Propagated total of 161935 charges in 3242 steps in average time of 6.71046ns
(I) [F:SimpleTransfer:telescope1] Transferred total of 0 charges to 0 different pixels
(I) [F:SimpleTransfer:dut] Transferred total of 250293 charges to 9 different pixels
(I) [F:SimpleTransfer:telescope2] Transferred total of 161935 charges to 4 different pixels
(I) [F:DefaultDigitizer:telescope1] Digitized 0 pixel hits in total
(I) [F:DefaultDigitizer:dut] Digitized 22 pixel hits in total
(I) [F:DefaultDigitizer:telescope2] Digitized 20 pixel hits in total
(I) [F:DetectorHistogrammer:dut] Plotted 22 hits in total, mean position is (125.773,125.318)
(I) [F:VisualizationGeant4] Starting visualization session
(S) Finalization completed
(S) Executed 15 instantiations in 5 seconds, spending 52% of time in slowest instantiation VisualizationGeant4
(S) Average processing time is 1060 ms/event, event generation at 1 Hz
\end{lstlisting}
<file_sep>/src/modules/ROOTObjectReader/ROOTObjectReaderModule.cpp
/**
* @file
* @brief Implementation of ROOT data file reader module
* @copyright MIT License
*/
#include "ROOTObjectReaderModule.hpp"
#include <climits>
#include <string>
#include <utility>
#include <TBranch.h>
#include <TKey.h>
#include <TObjArray.h>
#include <TTree.h>
#include "core/messenger/Messenger.hpp"
#include "core/utils/log.h"
#include "core/utils/type.h"
#include "objects/Object.hpp"
#include "objects/objects.h"
#include "core/utils/type.h"
using namespace allpix;
ROOTObjectReaderModule::ROOTObjectReaderModule(Configuration config, Messenger* messenger, GeometryManager* geo_mgr)
: Module(config), config_(std::move(config)), messenger_(messenger), geo_mgr_(geo_mgr) {}
/**
* @note Objects cannot be stored in smart pointers due to internal ROOT logic
*/
ROOTObjectReaderModule::~ROOTObjectReaderModule() {
for(auto message_inf : message_info_array_) {
delete message_inf.objects;
}
}
/**
* Adds lambda function map to convert a vector of generic objects to a templated message containing this particular type of
* object from its typeid.
*/
template <typename T> static void add_creator(ROOTObjectReaderModule::MessageCreatorMap& map) {
map[typeid(T)] = [&](std::vector<Object*> objects, std::shared_ptr<Detector> detector = nullptr) {
std::vector<T> data;
for(auto& object : objects) {
data.emplace_back(std::move(*static_cast<T*>(object)));
}
if(detector == nullptr) {
return std::make_shared<Message<T>>(data);
}
return std::make_shared<Message<T>>(data, detector);
};
}
/**
* Uses SFINAE trick to call the add_creator function for all template arguments of a container class. Used to add creater
* for every object in tuple of objects.
*/
template <template <typename...> class T, typename... Args>
static void gen_creator_map_from_tag(ROOTObjectReaderModule::MessageCreatorMap& map, type_tag<T<Args...>>) {
std::initializer_list<int> value{(add_creator<Args>(map), 0)...};
(void)value;
}
/**
* Wrapper function to make the SFINAE trick in \ref gen_creator_map_from_tag work.
*/
template <typename T> static ROOTObjectReaderModule::MessageCreatorMap gen_creator_map() {
ROOTObjectReaderModule::MessageCreatorMap ret_map;
gen_creator_map_from_tag(ret_map, type_tag<T>());
return ret_map;
}
void ROOTObjectReaderModule::init() {
// Initialize the call map from the tuple of available objects
message_creator_map_ = gen_creator_map<allpix::OBJECTS>();
// Open the file with the objects
input_file_ = std::make_unique<TFile>(config_.getPath("file_name", true).c_str());
// Read all the trees in the file
TList* keys = input_file_->GetListOfKeys();
for(auto&& object : *keys) {
auto& key = dynamic_cast<TKey&>(*object);
if(std::string(key.GetClassName()) == "TTree") {
trees_.push_back(static_cast<TTree*>(key.ReadObjectAny(nullptr)));
}
}
if(trees_.empty()) {
LOG(ERROR) << "Provided ROOT file does not contain any trees, module is useless!";
}
// Loop over all found trees
for(auto& tree : trees_) {
// Loop over the list of branches and create the set of receiver objects
TObjArray* branches = tree->GetListOfBranches();
for(int i = 0; i < branches->GetEntries(); i++) {
auto* branch = static_cast<TBranch*>(branches->At(i));
// Add a new vector of objects and bind it to the branch
message_info message_inf;
message_inf.objects = new std::vector<Object*>;
message_info_array_.emplace_back(message_inf);
branch->SetAddress(&(message_info_array_.back().objects));
// Fill the rest of the message information
// FIXME: we want to index this in a different way
std::string branch_name = branch->GetName();
auto split = allpix::split<std::string>(branch_name, "_");
// Fetch information from the tree name
size_t expected_size = 2;
size_t det_idx = 0;
size_t name_idx = 1;
if(branch_name.front() == '_' || branch_name.empty()) {
--expected_size;
det_idx = INT_MAX;
--name_idx;
}
if(branch_name.find('_') == std::string::npos) {
--expected_size;
name_idx = INT_MAX;
}
auto split_type = allpix::split<std::string>(branch->GetClassName(), "<>");
if(expected_size != split.size() || split_type.size() != 2) {
throw ModuleError("Tree is malformed and cannot be used for creating messages");
}
std::string message_name;
if(name_idx != INT_MAX) {
message_info_array_.back().name = split[name_idx];
}
std::shared_ptr<Detector> detector = nullptr;
if(det_idx != INT_MAX) {
message_info_array_.back().detector = geo_mgr_->getDetector(split[det_idx]);
}
}
}
}
void ROOTObjectReaderModule::run(unsigned int event_num) {
--event_num;
for(auto& tree : trees_) {
if(event_num >= tree->GetEntries()) {
LOG(WARNING) << "Skipping run because tree does not contain data for event " << event_num;
return;
}
tree->GetEntry(event_num);
}
LOG(TRACE) << "Building messages from stored objects";
// Loop through all branches
for(auto message_inf : message_info_array_) {
auto objects = message_inf.objects;
// Skip empty objects in current event
if(objects->empty()) {
continue;
}
// Check if a pointer to a dispatcher method exist
auto first_object = (*objects)[0];
auto iter = message_creator_map_.find(typeid(*first_object));
if(iter == message_creator_map_.end()) {
LOG(INFO) << "Cannot dispatch message with object " << allpix::demangle(typeid(*first_object).name())
<< " because it not registered for messaging";
continue;
}
// Update statistics
read_cnt_ += objects->size();
// Create a message
std::shared_ptr<BaseMessage> message = iter->second(*objects, message_inf.detector);
// Dispatch the message
messenger_->dispatchMessage(this, message, message_inf.name);
}
}
void ROOTObjectReaderModule::finalize() {
int branch_count = 0;
for(auto& tree : trees_) {
branch_count += tree->GetListOfBranches()->GetEntries();
}
// Print statistics
LOG(INFO) << "Read " << read_cnt_ << " objects from " << branch_count << " branches";
// Close the file
input_file_->Close();
}
<file_sep>/doc/usermanual/config.tex
% Add command to display allpix squared
\newcommand{\apsq}{\texorpdfstring{\ensuremath{\mathrm{Allpix}^2}}{Allpix\textasciicircum 2}\xspace}
% Temporary TODO commands
% \newcommand{\comment}[1]{#1} % DRAFT
\newcommand{\comment}[1]{} % FINAL
\newcommand{\needcite}{\comment{[CITE?] }}
\newcommand{\needref}{\comment{[REF?] }}
\newcommand{\todo}[1]{\comment{[TODO: #1] }}
\newcommand{\wip}{\textit{This section is not written yet.}}
% Paragraph with new line
\newcommand{\nlparagraph}[1]{\paragraph{#1}\mbox{}\\}
% Define ini format used in the converted Markdown files
\lstdefinelanguage{Ini}
{
basicstyle=\ttfamily\small,
columns=fullflexible,
morecomment=[s][\color{blue}\bfseries]{[}{]},
morecomment=[l]{\#},
morecomment=[l]{;},
commentstyle=\color{gray}\ttfamily,
alsoletter={=},
morekeywords={=},
otherkeywords={},
keywordstyle={\color{green}\bfseries}
}
% Command to add all modules
\newcommand{\includemodulesmd}{\def\temp{@ALLPIX_MODULE_FILES@}\ifx\temp\empty
\textit{Module documentation not added because Markdown to LaTex conversion was not possible}
\else
\foreach \n in @ALLPIX_MODULE_FILES@ {\input{\n}}
\fi}
% Command to add a single converted markdown file
\newcommand{\inputmd}[1]{\input{md/#1}}
% Set bibliography
\addbibresource{usermanual/references.bib}
% Set allpix version
\newcommand{\version}{\lstinline|@ALLPIX_VERSION@|}
\newcommand{\project}{@CMAKE_PROJECT_NAME@}
% Use new lines in FAQ
% FIXME this creates an issue when running htlatex for HTML output:
% ! Argument of \enit@postlabel@i has an extra }.
%\setlist[description]{style=nextline}
<file_sep>/doc/usermanual/chapters/development.tex
\section{Module \& Detector Development}
\subsection{Implementing a New Module}
\label{sec:building_new_module}
Before creating a module it is essential to read through the framework module manager documentation in Section \ref{sec:module_manager}, the information about the directory structure in Section \ref{sec:module_files} and the details of the module structure in Section \ref{sec:module_structure}. Thereafter the steps below should provide enough details for starting with a new module \texttt{ModuleName} (constantly replacing \texttt{ModuleName} with the real name of the new module):
\begin{enumerate}
\item Run the module initialization script at \textit{etc/scripts/make\_module.sh} in the repository. The script will ask for the name of the model and the type (unique or detector-specific). It creates the directory with a minimal example toe get started together with a setup of the documentation in \textit{README.md}.
\item Before continuing to implement the module it is recommended to check and update the introductory documention in \textit{README.md}. Also the Doxygen documentation in \textit{\texttt{ModuleName}.hpp} can be extended to give a basic description of the module.
\item Now the constructor, and possibly the \texttt{init}, \texttt{run} and/or \texttt{finalize} methods can be written, depending on what the new module needs.
\end{enumerate}
After this, it is up to the developer to implement all the required functionality in the module. Keep considering however that at some point it may be beneficial to split up modules to support the modular design of \apsq. Various sources which may be primarily useful during the development of the module include:
\begin{itemize}
\item The framework documentation in Section \ref{sec:framework} for an introduction to the different parts of the framework.
\item The module documentation in Section \ref{sec:modules} for a description of functionality other modules already provide and to look for similar modules which can help during development.
\item The Doxygen (core) reference documentation included in the framework \todo{available at location X}.
\item The latest version of the source code of all the modules (and the core itself). Freely available to copy and modify under the MIT license at \url{https://gitlab.cern.ch/simonspa/allpix-squared/tree/master}.
\end{itemize}
Any module that may be useful for other people can be contributed back to the main repository. It is very much encouraged to send a merge-request at \url{https://gitlab.cern.ch/simonspa/allpix-squared/merge_requests}.
\subsection{Adding a New Detector Model}
\label{sec:adding_detector_model}
Custom detector models can be easily added to the framework. Required information, before writing the model, is Section \ref{sec:config_file_format} describing the file format, Section \ref{sec:config_values} for information about the units used in \apsq and the full Section \ref{sec:models_geometry} describing the geometry and detector models. In particular Section \ref{sec:detector_models} explains all the parameters of the detector model. The default models shipped in \textit{models} could serve as examples. To write your own module follow the steps below:
\begin{enumerate}
\item Create a new file with the internal name of the model followed by the \textit{.conf} suffix (for example \texttt{your\_model.conf}).
\item Add a configuration parameter \texttt{type} with the type of the model, at the moment either 'monolithic' or 'hybrid' for respectively monolithic sensors or hybrid models with bump bonds.
\item Add all the required parameters and possibly other optional parameters explained in Section \ref{sec:detector_models}.
\item Include the detector model in the search path of the framework by adding the \texttt{model\_path} parameter to the general setting of the main configuration (see Section \ref{sec:framework_parameters}) pointing to either directly to the detector model file or the detector containing it (note that files in this path overwrite models with the same name in the default model folder).
\end{enumerate}
Models can be contributed to the repository to make them available to other users of the framework. To add the detector model to the framework the configuration file should be moved to the \textit{models} folder of the repository. Then the file should be added to the installation target in the \textit{CMakeLists.txt} file in the \textit{models} directory. Afterwards a merge-request can be created at \url{https://gitlab.cern.ch/simonspa/allpix-squared/merge_requests}.
<file_sep>/tools/root_analysis_macros/README.md
## ROOT Analysis Macros
Collection of macros to analyze the data generated by the framework. Currently contains a single macro to convert the TTree of objects to a tree containing typical standard data users are interested in. This is useful for simple comparisons with other frameworks.
#### Comparison tree
Read all the required tree from the given file and bind their contents to the objects defined in the framework. Then creates an output tree and bind every branch to a simple arithmetic type. Continues to loop over all the events in the tree and converting the stored data of the various trees to the output tree. The final output tree contains branches for the cluster sizes, aspect ratios, accumulated charge per event, the track position from the Monte-Carlo truth and the reconstructed track using a very simple direct center of gravity calculation using the charges without any corrections.
<file_sep>/doc/usermanual/chapters/objects.tex
\section{Objects}
\label{sec:objects}
\apsq ships a set of objects that should be used to transfer data between modules. These objects can be send with the messaging system explained in Section \ref{sec:objects_messages}. A typedef is added to every object to provide an alternative name for the message directly linking to the carried object.
Currently this list of supported objects are the following:
\nlparagraph{MCParticle}
The Monte-Carlo truth information about the particle passage through the sensor. Both the entry and the exit point are stored in the object, to approximate the track. The exact handling of non-linear tracks due to for example in-sensor nuclear interactions, is up to module. The MCParticle also stores an identifier of the particle type. The naming scheme is again up to the module, but it is recommended to use PDG codes~\cite{pdg}.
\nlparagraph{DepositedCharge}
Set of charges that are deposited by an ionizing particle crossing the active material of the sensor. The object stores both the \underline{local} position in the sensor together with the total number of deposited charges in elementary charge units. Also the time (in \textit{ns} the internal framework unit) of the deposition after the start of the event is stored.
\nlparagraph{PropagatedCharge}
Set of charges that are propagated through the silicon sensor due to drift and/or diffusion processes. The object should store the final \underline{local} position of the propagation. This is either on the pixel implant if the set of charges are ready to be collected, or on any other position in the sensor if the set of charges got stuck or lost in another process. Timing information about the total time to arrive at the final location, from the start of the event, can also be stored.
\nlparagraph{PixelCharge}
Set of charges that are collected at a single pixel. The pixel indices are stored in both the $x$ and $y$ direction, starting to count from zero from the first pixel. Only the total number of charges at a pixel is currently stored, the timing information of the individual charges can be retrieved from the related PropagatedCharge objects.
\nlparagraph{PixelHit}
Digitized hit of a pixel after digitization. The object allows to store both the time and a signal value. The time can be stored in an arbitrary unit used to timestamp the hits. The signal can also store different kind of information depending on the type of the digitizer used. Examples of the signal information is the 'true' charge, the number of ADC counts or the ToT (time-over-threshold).
<file_sep>/src/core/config/ConfigReader.cpp
/**
* @file
* @brief Implementation of config reader
* @copyright MIT License
*/
#include "ConfigReader.hpp"
#include <cstdlib>
#include <fstream>
#include <string>
#include <vector>
#include "core/utils/file.h"
#include "core/utils/log.h"
#include "exceptions.h"
using namespace allpix;
ConfigReader::ConfigReader() = default;
ConfigReader::ConfigReader(std::istream& stream, std::string file_name) : ConfigReader() {
add(stream, std::move(file_name));
}
ConfigReader::ConfigReader(const ConfigReader& other) : conf_array_(other.conf_array_) {
copy_init_map();
}
ConfigReader& ConfigReader::operator=(const ConfigReader& other) {
conf_array_ = other.conf_array_;
copy_init_map();
return *this;
}
void ConfigReader::copy_init_map() {
conf_map_.clear();
for(auto iter = conf_array_.begin(); iter != conf_array_.end(); ++iter) {
conf_map_[iter->getName()].push_back(iter);
}
}
/**
* @throws ConfigParseError If an error occurred during the parsing of the stream
*
* The configuration is immediately parsed and all of its configurations are available after the functions returns.
*/
void ConfigReader::add(std::istream& stream, std::string file_name) {
LOG(TRACE) << "Parsing configuration file " << file_name;
// Convert file name to absolute path (if given)
if(!file_name.empty()) {
file_name = allpix::get_absolute_path(file_name);
}
// Build first empty configuration
std::string section_name;
Configuration conf(section_name, file_name);
int line_num = 0;
while(true) {
// Process config line by line
std::string line;
if(stream.eof()) {
break;
}
std::getline(stream, line);
++line_num;
// Find equal sign
size_t equals_pos = line.find('=');
if(equals_pos == std::string::npos) {
line = allpix::trim(line);
// Ignore empty lines or comments
if(line == "" || line[0] == '#') {
continue;
}
// Parse new section
if(line[0] == '[' && line[line.length() - 1] == ']') {
// Ignore empty sections if they contain no configurations
if(!conf.getName().empty() || conf.countSettings() > 0) {
// Add previous section
conf_array_.push_back(conf);
conf_map_[section_name].push_back(--conf_array_.end());
}
// Begin new section
section_name = std::string(line, 1, line.length() - 2);
conf = Configuration(section_name, file_name);
} else {
// FIXME: should be a bit more helpful...
throw ConfigParseError(file_name, line_num);
}
} else {
std::string key = trim(std::string(line, 0, equals_pos));
std::string value = trim(std::string(line, equals_pos + 1));
char ins = 0;
for(size_t i = 0; i < value.size(); ++i) {
if(value[i] == '\'' || value[i] == '\"') {
if(ins == 0) {
ins = value[i];
} else if(ins == value[i]) {
ins = 0;
}
}
if(ins == 0 && value[i] == '#') {
value = std::string(value, 0, i);
break;
}
}
// Add the config key
conf.setText(key, trim(value));
}
}
// Add last section
conf_array_.push_back(conf);
conf_map_[section_name].push_back(--conf_array_.end());
}
void ConfigReader::addConfiguration(Configuration config) {
conf_array_.push_back(std::move(config));
conf_map_[conf_array_.back().getName()].push_back(--conf_array_.end());
}
void ConfigReader::clear() {
conf_map_.clear();
conf_array_.clear();
}
bool ConfigReader::hasConfiguration(const std::string& name) const {
return conf_map_.find(name) != conf_map_.end();
}
unsigned int ConfigReader::countConfigurations(const std::string& name) const {
if(!hasConfiguration(name)) {
return 0;
}
return static_cast<unsigned int>(conf_map_.at(name).size());
}
/**
* @warning This will have the file path of the first header section
* @note An empty configuration is returned if no empty section is found
*/
Configuration ConfigReader::getHeaderConfiguration() const {
// Get empty configurations
std::vector<Configuration> configurations = getConfigurations("");
if(configurations.empty()) {
// Use all configurations to find the file name if no empty
configurations = getConfigurations();
std::string file_name;
if(!configurations.empty()) {
file_name = configurations.at(0).getFilePath();
}
return Configuration("", file_name);
}
// Merge all configurations
Configuration header_config = configurations.at(0);
for(auto& config : configurations) {
// NOTE: Merging first configuration again has no effect
header_config.merge(config);
}
return header_config;
}
std::vector<Configuration> ConfigReader::getConfigurations(const std::string& name) const {
if(!hasConfiguration(name)) {
return std::vector<Configuration>();
}
std::vector<Configuration> result;
for(auto& iter : conf_map_.at(name)) {
result.push_back(*iter);
}
return result;
}
std::vector<Configuration> ConfigReader::getConfigurations() const {
return std::vector<Configuration>(conf_array_.begin(), conf_array_.end());
}
<file_sep>/src/objects/SensorCharge.cpp
/**
* @file
* @brief Definition of object for charges in sensor
* @copyright MIT License
*/
#include "SensorCharge.hpp"
using namespace allpix;
SensorCharge::SensorCharge(ROOT::Math::XYZPoint local_position,
ROOT::Math::XYZPoint global_position,
CarrierType type,
unsigned int charge,
double event_time)
: local_position_(std::move(local_position)), global_position_(std::move(global_position)), type_(type), charge_(charge),
event_time_(event_time) {}
ROOT::Math::XYZPoint SensorCharge::getLocalPosition() const {
return local_position_;
}
CarrierType SensorCharge::getType() const {
return type_;
}
unsigned int SensorCharge::getCharge() const {
return charge_;
}
double SensorCharge::getEventTime() const {
return event_time_;
}
ClassImp(SensorCharge)
<file_sep>/tools/tcad_dfise_converter/README.md
## TCAD DF-ISE mesh converter
This code takes as input the .grd and .dat files from TCAD simulations. The .grd file contains the vertex coordinates (3D or 2D) of each mesh node and the .dat file contains the module of each electric field vector component for each mesh node, grouped by model regions (such as silicon bulk or metal contacts). The regions are defined in the .grd file by grouping vertices into edges, faces and, consecutively, volumes or elements.
A new regular mesh is created by scanning the model volume in regular X Y and Z steps (not coinciding necessarily with original mesh nodes) and using a barycentric interpolation method to calculate the respective electric field vector on the new point. The interpolation uses the 4 closest, no-coplanar, neighbour vertex nodes that respective tetrahedron encloses the query point. For the neighbours search, the software uses the Octree implementation from the paper "Efficient Radius Neighbor Search in Three-dimensional Point Clouds" by <NAME> al (see below).
The output .init file (with the same .grd and .dat prefixe) can be imported into allpix (see the User's Manual for details). The INIT file has a header followed by a list of columns organized as
```bash
node.x node.y node.z e-field.x e-field.y e-field.z
```
#### Features
- TCAD DF-ISE file format reader.
- Fast radius neighbor search for three-dimensional point clouds.
- Barycentric interpolation between non-regular mesh points.
- Several cuts available on the interpolation algorithm variables.
- Interpolated data visalisation tool.
#### Usage
Example .grd and .dat files can be found in the data folder with the example_data prefix.
To run the program the following should be executed from the installation folder:
```bash
bin/tcad_dfise_converter/dfise_converter -f <file_name_prefix> [<options>] [<arguments>]
```
The list with options can be accessed using the -h option.
Default values are assumed for the options not used. These are
-R <region> = "bulk"
-r <search radius> = 1 um
-r <radius step> = 0.5 um
-m <max radius> = 10 um
-c <volume cut> = std::numeric_limits<double>::min()
-x,y,z <mesh binning> = 100 (option should be set using -x, -y and -z)
The output INIT file will be named with the same file_name_prefix as the .grd and .dat files.
INIT files are read by specifying a file_name containing an .INIT file. The mesh_plotter tool can be used from the installation folder as follows:
```bash
bin/tcad_dfise_converter/mesh_plotter -f <file_name> [<options>] [<arguments>]
```
The list with options and defaults is shown with the -h option. A default value of 100 is used for the binning, but this can be changed.
In a 3D mesh, the plane to be plotted must be identified by using the option -p with argument *xy*, *yz* or *zx*, defaulting to *yz*.
The data to be ploted can be selected with the -d option, the arguments are *ex*, *ey*, *ez* for the vector components or the default value *n* for the norm of the electric field.
#### Octree
[corresponding paper](http://jbehley.github.io/papers/behley2015icra.pdf):
<NAME>, <NAME>, <NAME>. *Efficient Radius Neighbor Search in Three-dimensional Point Clouds*, Proc. of the IEEE International Conference on Robotics and Automation (ICRA), 2015.
Copyright 2015 <NAME>, University of Bonn.
This project is free software made available under the MIT License. For details see the OCTREE LICENSE file.
<file_sep>/src/modules/DefaultDigitizer/DefaultDigitizerModule.cpp
/**
* @file
* @brief Implementation of default digitization module
* @copyright MIT License
*/
#include "DefaultDigitizerModule.hpp"
#include "core/utils/unit.h"
#include "tools/ROOT.h"
#include <TFile.h>
#include <TH1D.h>
#include <TProfile.h>
#include "objects/PixelHit.hpp"
using namespace allpix;
DefaultDigitizerModule::DefaultDigitizerModule(Configuration config,
Messenger* messenger,
std::shared_ptr<Detector> detector)
: Module(config, std::move(detector)), config_(std::move(config)), messenger_(messenger), pixel_message_(nullptr) {
// Require PixelCharge message for single detector
messenger_->bindSingle(this, &DefaultDigitizerModule::pixel_message_, MsgFlags::REQUIRED);
// Seed the random generator with the global seed
random_generator_.seed(getRandomSeed());
// Set defaults for config variables
config_.setDefault<int>("electronics_noise", Units::get(110, "e"));
config_.setDefault<int>("threshold", Units::get(600, "e"));
config_.setDefault<int>("threshold_smearing", Units::get(30, "e"));
config_.setDefault<int>("adc_smearing", Units::get(300, "e"));
config_.setDefault<bool>("output_plots", false);
}
void DefaultDigitizerModule::init() {
if(config_.get<bool>("output_plots")) {
LOG(TRACE) << "Creating output plots";
// Create histograms if needed
h_pxq = new TH1D("pixelcharge", "raw pixel charge;pixel charge [ke];pixels", 100, 0, 10);
h_pxq_noise = new TH1D("pixelcharge_noise_", "pixel charge w/ el. noise;pixel charge [ke];pixels", 100, 0, 10);
h_thr = new TH1D("threshold", "applied threshold; threshold [ke];events", 100, 0, 10);
h_pxq_thr = new TH1D("pixelcharge_threshold_", "pixel charge above threshold;pixel charge [ke];pixels", 100, 0, 10);
h_pxq_adc = new TH1D("pixelcharge_adc", "pixel charge after ADC;pixel charge [ke];pixels", 100, 0, 10);
}
}
void DefaultDigitizerModule::run(unsigned int) {
// Loop through all pixels with charges
std::vector<PixelHit> hits;
for(auto& pixel_charge : pixel_message_->getData()) {
auto pixel = pixel_charge.getPixel();
auto pixel_index = pixel.getIndex();
auto charge = static_cast<double>(pixel_charge.getCharge());
LOG(DEBUG) << "Received pixel " << pixel_index << ", charge " << Units::display(charge, "e");
if(config_.get<bool>("output_plots")) {
h_pxq->Fill(charge / 1e3);
}
// Add electronics noise from Gaussian:
std::normal_distribution<double> el_noise(0, config_.get<unsigned int>("electronics_noise"));
charge += el_noise(random_generator_);
LOG(DEBUG) << "Charge with noise: " << Units::display(charge, "e");
if(config_.get<bool>("output_plots")) {
h_pxq_noise->Fill(charge / 1e3);
}
// FIXME Simulate gain / gain smearing
// Smear the threshold, Gaussian distribution around "threshold" with width "threshold_smearing"
std::normal_distribution<double> thr_smearing(config_.get<unsigned int>("threshold"),
config_.get<unsigned int>("threshold_smearing"));
double threshold = thr_smearing(random_generator_);
if(config_.get<bool>("output_plots")) {
h_thr->Fill(threshold / 1e3);
}
// Discard charges below threshold:
if(charge < threshold) {
LOG(DEBUG) << "Below smeared threshold: " << Units::display(charge, "e") << " < "
<< Units::display(threshold, "e");
continue;
}
LOG(DEBUG) << "Passed threshold: " << Units::display(charge, "e") << " > " << Units::display(threshold, "e");
if(config_.get<bool>("output_plots")) {
h_pxq_thr->Fill(charge / 1e3);
}
// Add ADC smearing:
std::normal_distribution<double> adc_smearing(0, config_.get<unsigned int>("adc_smearing"));
charge += adc_smearing(random_generator_);
if(config_.get<bool>("output_plots")) {
h_pxq_adc->Fill(charge / 1e3);
}
// Add the hit to the hitmap
hits.emplace_back(pixel, 0, charge, &pixel_charge);
// FIXME Simulate analog / digital cross talk
// double crosstalk_neigubor_row = 0.00;
// double crosstalk_neigubor_column = 0.00;
}
// Output summary and update statistics
LOG(INFO) << "Digitized " << hits.size() << " pixel hits";
total_hits_ += hits.size();
if(!hits.empty()) {
// Create and dispatch hit message
auto hits_message = std::make_shared<PixelHitMessage>(std::move(hits), getDetector());
messenger_->dispatchMessage(this, hits_message);
}
}
void DefaultDigitizerModule::finalize() {
if(config_.get<bool>("output_plots")) {
// Write histograms
LOG(TRACE) << "Writing output plots to file";
h_pxq->Write();
h_pxq_noise->Write();
h_thr->Write();
h_pxq_thr->Write();
h_pxq_adc->Write();
}
LOG(INFO) << "Digitized " << total_hits_ << " pixel hits in total";
}
<file_sep>/src/objects/Pixel.cpp
/**
* @file
* @brief Implementation of pixel object
* @copyright MIT License
*/
#include "Pixel.hpp"
using namespace allpix;
Pixel::Pixel(Pixel::Index index,
ROOT::Math::XYZPoint local_center,
ROOT::Math::XYZPoint global_center,
ROOT::Math::XYVector size)
: index_(std::move(index)), local_center_(std::move(local_center)), global_center_(std::move(global_center)),
size_(std::move(size)) {}
Pixel::Index Pixel::getIndex() const {
return index_;
}
ROOT::Math::XYZPoint Pixel::getLocalCenter() const {
return local_center_;
}
ROOT::Math::XYZPoint Pixel::getGlobalCenter() const {
return global_center_;
}
ROOT::Math::XYVector Pixel::getSize() const {
return size_;
}
ClassImp(Pixel)
<file_sep>/doc/usermanual/chapters/faq.tex
\section{Frequently Asked Questions}
\label{sec:faq}
\begin{description}
\item[How do I run a module only for one detector?]\mbox{}\\
This is only possible for detector modules (which are constructed to work on individual detectors). To run it on a single detector one should add a parameter \texttt{name} specifying the name of the detector (as given in the detector configuration file).
\item[How do I run a module only for a specific detector type?]\mbox{}\\ This is only possible for detector modules (which are constructed to work on individual detectors). To run it for a specific type of detectors one should add a parameter \texttt{type} with the type of the detector model (as given in the detector configuration file by the \texttt{model} parameter).
\item[How can I run the exact same type of module with different settings?] This is possible by using the \texttt{input} and \texttt{output} parameters of a module that specialize the location where the messages from the modules are send to and received from. By default both the input and the output of module defaults to the message without a name.
\item[How can I temporarily ignore a module during development?]\mbox{}\\ The section header of a particular module in the configuration file can be replaced by the string \texttt{Ignore}. The section and all of its key/value pairs are then ignored.
\item[Can I get a high verbosity level only for a specific module?]\mbox{}\\ Yes, it is possible to specify verbosity levels and log formats per module. This can be done by adding a \texttt{log\_level} and/or \texttt{log\_format} key to a specific module to replace the parameter in the global configuration sections.
\item[I want to use a detector model with one or several small changes, do I have to create a whole new model for this?] No, models can be specialized in the detector configuration file. This feature is available to, for example, use models with different sensor thicknesses. To specialize a detector model the key that should be changed in the standard detector model (like \texttt{sensor\_thickness}) should be added as key to the section of the detector configuration (which is always required to already contain the position, orientation and the base model). Only parameters in the header of detector models can be changed. If support layers should be changed, or new support layers are needed, a new model should be created instead.
\item[How do I access the history of a particular object?]\mbox{}\\ Many objects can include an internal link to related other objects (for example \texttt{getPropagatedCharges} in the \texttt{PixelCharge} object), containing the history of the object (thus the objects that were used to construct the current object). These referenced objects are stored as special ROOT pointers inside the object, which can only be accessed if the referenced object is available in memory. In \apsq this requirement can be automatically fullfilled by also binding the history object in a module, assuming the creating module actually saved the history with the object which is not strictly required. During analysis the tree holding the referenced object should be loaded and pointing to the same event entry as the object that request the reference to load it. If the referenced object can not be loaded an exception is required to be thrown by the retrieving method.
\item[How do I access the Monte Carlo truth of a specific PixelHit?]\mbox{}\\ The Monte Carlo truth is just part of the indirect history of a PixelHit. This means that the Monte-Carlo truth can be fetched as described in the question above. However take notice that there are multiple layers between a PixelHit and its MCParticles, which are the PixelCharge, PropagatedCharges and DepositedCharges. These should all be loaded in memory to make it possible to fetch the history. Because getting the Monte Carlo truth of a PixelHit is quite a common thing a \texttt{getMCParticles} convenience method is available which searches all the layers of the history and returns an exception if any of the in between steps is not available or not loaded.
\item[Can I import an electric field from TCAD and use that for simulating propagation?] Yes, the framework includes a tool to convert DF-ISE files from TCAD to an internal format which \apsq can parse. More information about this tool can be found in Section \ref{sec:tcad_electric_field_converter}, instructions to import the generated field are given in Section \ref{sec:module_electric_field}.
\end{description}
\todo{Add more questions}
<file_sep>/doc/usermanual/chapters/quick_start.tex
\section{Quick Start}
This chapter serves as a swift introduction to \apsq for users who prefer to start quickly and learn the details while simulating. The typical user should skip the next paragraphs and continue to Section \ref{sec:introduction} instead.
\apsq is a generic simulation framework for pixel detectors. It provides a modular, flexible and user-friendly structure for the simulation of independent detectors in the geometry. The framework currently relies on the Geant4~\cite{geant4}, ROOT~\cite{root} and Eigen3~\cite{eigen3} libraries, that need to be installed and loaded before using \apsq.
The minimal, default installation can be installed by executing the commands below. More detailed installation instructions are found in Section \ref{sec:installation}.
\begin{verbatim}
$ git clone https://gitlab.cern.ch/simonspa/allpix-squared
$ cd allpix-squared
$ mkdir build && cd build/
$ cmake ..
$ make install
$ cd ..
\end{verbatim}
The binary can then be executed with the example configuration file as follows:
\begin{verbatim}
$ bin/allpix -c etc/example.conf
\end{verbatim}
Hereafter, the example configuration can be copied and adjusted to the needs of the user. This example contains a simple setup of two test detectors. It simulates the whole process from the passage of the beam, the deposition of charges in the detectors, the particle propagation and the conversion of the collected charges to digitized pixel hits. All the generated data is finally stored on disk for later analysis.
After this quick start it is very much recommended to read the other sections in more detail as well. For quickly solving common issues the Frequently Asked Questions in Section \ref{sec:faq} may be particularly useful.
<file_sep>/tools/tcad_dfise_converter/read_dfise.h
#ifndef DFISE_READ_H
#define DFISE_READ_H
#include <map>
#include <vector>
// Sections to read in DF-ISE file
enum class DFSection {
NONE = 0,
IGNORED,
HEADER,
INFO,
REGION,
COORDINATES,
VERTICES,
EDGES,
FACES,
ELEMENTS,
ELECTRIC_FIELD,
VALUES
};
// Simple point class to store data
class Point {
public:
Point() = default;
Point(double px, double py, double pz) : x(px), y(py), z(pz) {}
double x{0}, y{0}, z{0};
};
// Read the grid
std::map<std::string, std::vector<Point>> read_grid(const std::string& file_name);
// Read the electric field
std::map<std::string, std::vector<Point>> read_electric_field(const std::string& file_name);
#endif
<file_sep>/src/modules/DefaultDigitizer/README.md
## DefaultDigitizer
**Maintainer**: <NAME> (<<EMAIL>>)
**Status**: Functional
**Input**: PixelCharge
**Output**: PixelHit
#### Description
Very simple digitization module which translates the collected charges into a digitized signal proportional to the input charge. It simulates noise contributions from the readout electronics as gaussian noise and allow for a configurable threshold.
In detail, the following steps are performed for every pixel charge:
* A Gaussian noise is added to the input charge value in order to simulate input noise to the preamplifier circuit
* A charge threshold is applied. Only if the threshold is surpassed, the pixel is accounted for - for all values below the threshold, the pixel charge is discarded. The actually applied threshold is smeared with a Gaussian distribution on an event-by-event basis allowing for the simulation of fluctuations of the threshold level.
* An inaccuracy of the ADC is simulated using an additional Gaussian smearing, this allows to take ADC noise into account.
#### Parameters
* `electronics_noise` : Standard deviation of the Gaussian noise in the electronics (before applying the threshold). Defaults to 110 electrons.
* `threshold` : Threshold for considering the readout charge as a hit. Defaults to 600 electrons.
* `threshold_smearing` : Standard deviation of the Gaussian uncertainty in the threshold charge value. Defaults to 30 electrons.
* `adc_smearing` : Standard deviation of the Gaussian noise in the ADC conversion (after applying the threshold). Defaults to 300 electrons.
* `output_plots` : Enables output histograms to be be generated from the data in every step (slows down simulation considerably). Disabled by default.
#### Usage
The default configuration is equal to the following
```ini
[DefaultDigitizer]
electronics_noise = 110e
threshold = 600e
threshold_smearing = 30e
adc_smearing = 300e
```
<file_sep>/src/modules/LCIOWriter/LCIOWriterModule.cpp
/**
* @file
* @brief Implementation of [LCIOWriter] module
* @copyright MIT License
*/
#include "LCIOWriterModule.hpp"
#include <string>
#include <utility>
#include <vector>
#include "core/messenger/Messenger.hpp"
#include "core/utils/log.h"
#include <IMPL/LCCollectionVec.h>
#include <IMPL/LCEventImpl.h>
#include <IMPL/LCRunHeaderImpl.h>
#include <IMPL/TrackerDataImpl.h>
#include <IO/LCWriter.h>
#include <IOIMPL/LCFactory.h>
#include <UTIL/CellIDEncoder.h>
#include <lcio.h>
using namespace allpix;
using namespace lcio;
LCIOWriterModule::LCIOWriterModule(Configuration config, Messenger* messenger, GeometryManager* geo)
: Module(config), config_(std::move(config)) {
// Bind pixel hits message
messenger->bindMulti(this, &LCIOWriterModule::pixel_messages_, MsgFlags::REQUIRED);
// get all detector names and assign id.
std::vector<std::shared_ptr<Detector>> detectors = geo->getDetectors();
unsigned int i = 0;
for(const auto& det : detectors) {
detectorIDs_[det->getName()] = i;
LOG(DEBUG) << det->getName() << " has ID " << detectorIDs_[det->getName()];
i++;
}
pixelType_ = config_.get<int>("pixel_type", 2);
DetectorName_ = config_.get<std::string>("detector_name", "EUTelescope");
OutputCollectionName_ = config_.get<std::string>("output_collection_name", "zsdata_m26");
// Open LCIO file and write run header
lcWriter_ = LCFactory::getInstance()->createLCWriter();
lcWriter_->open(config_.get<std::string>("file_name", "output.slcio"), LCIO::WRITE_NEW);
auto run = std::make_unique<LCRunHeaderImpl>();
run->setRunNumber(1);
run->setDetectorName(DetectorName_);
lcWriter_->writeRunHeader(run.get());
}
void LCIOWriterModule::run(unsigned int eventNb) {
auto evt = std::make_unique<LCEventImpl>(); // create the event
evt->setRunNumber(1);
evt->setEventNumber(static_cast<int>(eventNb)); // set the event attributes
evt->parameters().setValue("EventType", 2);
// Prepare charge vectors
std::vector<std::vector<float>> charges;
for(unsigned int i = 0; i < detectorIDs_.size(); i++) {
std::vector<float> charge;
charges.push_back(charge);
}
// Receive all pixel messages, fill charge vectors
for(const auto& hit_msg : pixel_messages_) {
LOG(DEBUG) << hit_msg->getDetector()->getName();
for(const auto& hitdata : hit_msg->getData()) {
LOG(DEBUG) << "X: " << hitdata.getPixel().getIndex().x() << ", Y:" << hitdata.getPixel().getIndex().y()
<< ", Signal: " << hitdata.getSignal();
unsigned int detectorID = detectorIDs_[hit_msg->getDetector()->getName()];
switch(pixelType_) {
case 1: // EUTelSimpleSparsePixel
charges[detectorID].push_back(static_cast<float>(hitdata.getPixel().getIndex().x())); // x
charges[detectorID].push_back(static_cast<float>(hitdata.getPixel().getIndex().y())); // y
charges[detectorID].push_back(static_cast<float>(hitdata.getSignal())); // signal
break;
case 2: // EUTelGenericSparsePixel
default: // EUTelGenericSparsePixel is default
charges[detectorID].push_back(static_cast<float>(hitdata.getPixel().getIndex().x())); // x
charges[detectorID].push_back(static_cast<float>(hitdata.getPixel().getIndex().y())); // y
charges[detectorID].push_back(static_cast<float>(hitdata.getSignal())); // signal
charges[detectorID].push_back(static_cast<float>(0)); // time
break;
case 5: // EUTelTimepix3SparsePixel
charges[detectorID].push_back(static_cast<float>(hitdata.getPixel().getIndex().x())); // x
charges[detectorID].push_back(static_cast<float>(hitdata.getPixel().getIndex().y())); // y
charges[detectorID].push_back(static_cast<float>(hitdata.getSignal())); // signal
charges[detectorID].push_back(static_cast<float>(0)); // time
charges[detectorID].push_back(static_cast<float>(0)); // time
charges[detectorID].push_back(static_cast<float>(0)); // time
charges[detectorID].push_back(static_cast<float>(0)); // time
break;
}
}
}
// Prepare hitvector
LCCollectionVec* hitVec = new LCCollectionVec(LCIO::TRACKERDATA);
// Fill hitvector with event data
for(unsigned int detectorID = 0; detectorID < detectorIDs_.size(); detectorID++) {
auto hit = new TrackerDataImpl();
CellIDEncoder<TrackerDataImpl> sparseDataEncoder("sensorID:7,sparsePixelType:5", hitVec);
sparseDataEncoder["sensorID"] = detectorID;
sparseDataEncoder["sparsePixelType"] = pixelType_;
sparseDataEncoder.setCellID(hit);
hit->setChargeValues(charges[detectorID]);
hitVec->push_back(hit);
}
// Add collection to event and write event to LCIO file
evt->addCollection(hitVec, OutputCollectionName_); // add the collection with a name
lcWriter_->writeEvent(evt.get()); // write the event to the file
}
void LCIOWriterModule::finalize() {
lcWriter_->close();
}
LCIOWriterModule::~LCIOWriterModule() {
delete lcWriter_;
}
<file_sep>/doc/usermanual/chapters/acknowledgements.tex
\section{Acknowledgments}
\begin{itemize}
\item \textbf{<NAME>}, \textbf{<NAME>}, \textbf{<NAME>} and all other contributors to the first version of AllPix, for their earlier work that inspired \apsq.
\item \textbf{<NAME>} for interesting discussion, his experiments with TGeo and his help implementing a visualization module.
\item \textbf{<NAME>} for contributing his earlier work on simulating charge propagation and providing help on simulations with electric fields.
\item \textbf{<NAME>} for his help setting up several software tools like continous integration and automatic static-code analysis.
\item \textbf{<NAME>} for comments on the documentation and his help with porting the detector models from the original AllPix.
\item \textbf{<NAME>} for his contributions to the code and helpful discussions on different matters concerning the simulation process.
\item \textbf{<NAME>} for his help in implementing a tool to interpolate electric fields from a TCAD mesh format to the grid used in \apsq.
\end{itemize}
We would also like to thank all others not listed here, that have contributed to the source code, provided input or suggested improvements.
<file_sep>/src/objects/objects.h
/**
* @file
* @brief File including all current objects
* @copyright MIT License
*/
#include "DepositedCharge.hpp"
#include "MCParticle.hpp"
#include "Pixel.hpp"
#include "PixelCharge.hpp"
#include "PixelHit.hpp"
#include "PropagatedCharge.hpp"
namespace allpix {
/**
* @brief Tuple containing all objects
*/
using OBJECTS = std::tuple<MCParticle, DepositedCharge, PropagatedCharge, PixelCharge, PixelHit>;
}
<file_sep>/doc/usermanual/appendices/design_notes.tex
\section{Design Development Notes}
This chapter serves as a short overview of the design goals of \apsq and the related implementation choices. Note that \apsq API is currently in a very early development stage and any implementation detail here is subject to change.
\subsection{Goals}
\apsq is developed with a few goals in mind. These are (listed from most important to least important) the following:
\begin{enumerate}
\item Reflects the physics
\begin{itemize}
\item Event based - event here refers to particle(s) passage through the setup
\item Detectors are treated as independent objects for particles to pass through
\item All of the information must be contained at the very end of processing an event (sequential events)
\end{itemize}
\item Ease of use (user-friendly)
\begin{itemize}
\item Simplicity for module implementation without knowing details of the framework
\item Simple intuitive configuration and execution ("does what you expect")
\end{itemize}
\item Modularity
\begin{itemize}
\item Independent modules: write independent code without dependency on anything other but the core
\item Allow very simple but also advanced user configurations
\end{itemize}
\item Flexibility
\begin{itemize}
\item Allow to combine different detectors, different modules
\item Limit flexibility for the sake of simplicity and ease of use (higher on the list)
\end{itemize}
\end{enumerate}
\subsection{Setup}
\apsq consists of four major components listed below:
\begin{enumerate}
\item \textbf{Core}: The core contains the internal logic to initiate all modules and to run the event sequence. The core should keep its dependencies to a minimum and remain separated from the components below. More info later.
\item \textbf{Modules}: A set of methods that execute a subset of the simulation process. They are separate libraries, loaded dynamically by the core. Possible modules include one to (1) build the geometry, (2) deposit the charges in the detector, (3) propagate the charges in the detector, (4) transfer the charges to the readout chip (5) digitize the readings into a result. Furthermore there should be a set of file writer modules and (possibly) visualization modules to capture intermediate and final output. Finally there should be file input modules to simulate only a part instead of the full process.
\item \textbf{Messages}: Messages are pointers to the data passed around between the modules. Modules can listen and bind to messages they wish to receive. A message is passed around by type, but it is possible to give a message a particular name to allow modules to handle different message names (the default name is empty and dispatches the message to all modules listening to that specific type of message). Messages are meant to be read-only and a copy of the data should be made if a module developer wishes to change the data. Exact definition of the core messages is not yet provided (current ideas follow later).
\item \textbf{Tools}: \apsq will provide a simple interface to several widely used 'tools' a module wants to use. A possible example would be a runge-kutta solver.
\end{enumerate}
Finally the implementation provides a executable which integrates the core, messages and tools together with the command-line handling into a single process.
\subsection{Core}
The core is structured around one main object (AllPix) and a set of managers. Besides this it contains a few utilities for logging, some string operations and a set of exceptions that could happen during the execution of a single event. A manager is an unique object bound to AllPix that should not be copyable. The set of managers and their uses follows hereafter.
\begin{enumerate}
\item \textbf{ConfigManager}: Loads the main configuration files and provides a set of configurations (sections) that should be converted to a set of instantiations later. The config manager should be able to provide the set of configurations in the order as they are defined in the configuration file and also allow to fetch a configuration by header. The structure of the current configuration is inspired by a simple version of \href{https://github.com/toml-lang/toml}{TOML}. The most important rules are:
\begin{itemize}
\item Every configuration start with a bracket enclosed section like \textit{[section]}.
\item Multiple sections with the same name are allowed (as long as they lead to unique instances, see below)
\item Every section contain a set of key value pairs like \textit{key = value}
\item It is up to the module developers to determine the appropriate type of the value, incorrect data should lead to an exception
\item Supported values include strings, floating-point numbers (float, double) and integers (short, int, long, long long, etc.) as well as arrays of these types
\item An array is separated by either a space or a comma
\item Extra whitespace is ignored
\item Comment lines starting with either a semicolon or a hash are ignored
\item There are a set of special key-value pairs handled by the instantiation logic instead of the module itself (see below)
\item Configurations are fixed during one run of \apsq
\item Inheriting of values from similar configurations is not yet defined... (possibly we want to inherit higher level, see part about module identifier below)
\end{itemize}
\item \textbf{ModuleManager}: Instantiates the modules from the provided configuration. Fetches the linearly defined configurations defined in the config file from the config manager. The manager dynamically loads the modules from the name specified in the configuration header. The modules should use a factory builder that creates the appropriate amount of instances for the setup. Every instantiation has a single unique identifier that may only appear once in the configuration. An identifier consist of a eventual name and type (see below) together with a potential input and output name that defaults to none, but allows to run two module with the same identifier using different inputs or output names for their messages. There are two important factories:
\begin{itemize}
\item \textbf{UniqueModuleFactory}: Creates a single instance of the module with the section header name as unique identifier. These modules should only appear once in the configuration file unless either a separate input and output name are specified.
\item \textbf{DetectorModuleFactory}: Creates a separate instance for every detector in the setup. Special configuration key/value pairs 'name' and 'type' can be provided that only build an instance for the provided detectors. A name has a higher rank than a type and should replace any comparable type instance. Creating two instances with the same identifier and the same priority is not allowed (unless their input / output name differs).
\end{itemize}
\item \textbf{GeometryManager}: Provide access to the geometry description of the detector. This needs to be provided as a simple internally defined detector description which is used by most modules. Moreover the geometry manager should provide access to a detailed detector description provider. The amount of different descriptions is not defined, but should at the minimum include a valid Geant4 description needed for the deposition of the charge carriers. The construction of the data for the geometry manager is done by the geometry builder module to allow the core to be indepedent from Geant4 etc.
\item \textbf{Messenger}: Faciliates the message passing between the modules. Messages should be the only way modules communicate with each other (besides their concurrent access of data through the managers). On instantiation modules should register for the messages they wish to receive. They can either (1) register a callback to be called on message dispatching, (2) to bind the message to variable in the class or (3) to add the message to a list of messages in the module. Messages should only be dispatched in the run() method of every module. The messenger should take care to dispatch messages to the right detector instances and match input, output names that are potentially provided.
\end{enumerate}
\subsection{Messages}
A few ideas for messages are the following:
\begin{itemize}
\item CarrierDepositionMessage: with vector of carriers located at a certain local x,y,z position in the detector
\item CarrierCollectionMessage: with vector of carriers located at at a local x,y position on the bottom of the detector
\item CarrierTransferMessage: with vector of carriers located at a certain pixel
\item PixelHitMessage: with vector of counts of hits per pixel in a certain detector after digitizing
\end{itemize}
| 5ea1cbcd268c979fdd39005fba7eb74ecb4083eb | [
"Shell",
"CMake",
"Markdown",
"TeX",
"C++"
] | 65 | Shell | mateus-vicente/allpix-squared | bd958c058e6288ca5ff5a77abf8028bac696c8ed | 8cf7cf09c2d787c0f6a97457be9401042efe04d7 |
refs/heads/master | <repo_name>richimf/TutosAndroid<file_sep>/OperacionesSun/Conversiones/app/src/main/java/com/richie/conversiones/QuotientModule.java
package com.richie.conversiones;
/**
* Created by richie on 9/7/17.
*/
public class QuotientModule {
// (q,r) { q = a div d is the quotient, r = a mod d is the remainder }
private int module;
private int quotient;
public QuotientModule(int quotient, int module) {
this.module = module;
this.quotient = quotient;
}
public int getModule() {
return module;
}
public void setModule(int module) {
this.module = module;
}
public int getQuotient() {
return quotient;
}
public void setQuotient(int quotient) {
this.quotient = quotient;
}
}
<file_sep>/README.md
# TutosAndroid
Códigos de varios ejemplos de Android
#Componentes que conforman una App
- Activities
- Fragments
- Broadcast Receivers
- Content Providers
- Services
###Activity
Se puede pensar en una actividad como la "ventana". Este es el componente principal de la interfaz gráfica de una App.
###View
Las vistas (views). Android provee al inicio multiples controles básicos como botones, listas, imagenes, etc.
[View class reference](https://developer.android.com/reference/android/view/View)
### Content Provider
Es un mecanismo para compartir datos entre Apps. Nuestra app podrá acceder a los datos de otra a través de los `content provider` definidos. Este mecanismo es utilizado por muchas de las aplicaciones estandard de un dispositivo Android, como por ejemplo la **lista de contactos**, la aplicación de SMS, o el calendario/agenda.
Para añadir un content provider a nuestra aplicación tendremos que:
- Crear una nueva clase que extienda a la clase android `ContentProvider`.
- Declarar el nuevo content provider en nuestro fichero `AndroidManifest.xml`
### Broadcast Receiver
Es un componente destinado a **detectar y reaccionar ante determinados mensajes** o eventos globales generados por el sistema ejemplo **Bateria Baja**, Tarjeta SD insetada, o bien, mensajes generados por otras Apps `Intents`, esto no va dirigido a una App concreta, sino a aquellas que escuchan a este evento.
### Intents
Un `Intent` es el elemento básico de comunicacion entre distintos componentes de Android. Se puede entender como los "mensajes" o "peticiones" que son enviados entre los distintos componentes de una App o entre distintas Apps.
Con un Intent puedes:
- Mostrar un nuevo Activity
- Iniciar un `service`
- Enviar un mensaje `broadcast`
- Iniciar otra App
- etc...
###Service
Los servicios son componentes sin interfaz gráfica que **se ejecutan en 2do plano** (operaciones de larga duracion en el background aunque el usuario se cambie de App). Por ejemplo actualizar datos, lanzar notificaciones, **reproducir musica**, abrir archivos, etc...
Un servicio puede ser de dos formas:
- **Iniciado**, un Activity lo inicia con `startService()`, ej. subir un archivo, una vez terminado el upload, el servicio termina.
- **De Enlace**, cuando la App se vincula a este servicio mediante `bindService()`. Un servicio de enlace ofrece una interfaz *cliente-servidor* que permite que los componentes interactúen con el servicio, envíen solicitudes, obtengan resultados e incluso lo hagan en distintos procesos con la comunicación entre procesos IPC. Un servicio de enlace solo funciona si otro componente de app esta enlazado a él. Multiples componentes se pueden enlazar al servicio, pero cuando todos ellos se desenlazan, el servicio muere.
**¿Debes utilizar un servicio o un subproceso?**
Un servicio es simplemente un componente que puede ejecutarse en segundo plano, incluso cuando el usuario no está interactuando con tu aplicación. Por lo tanto, debes crear un servicio solo si eso es lo que necesitas.
Por ejemplo, si deseas reproducir música, pero solo mientras se ejecute tu actividad, puedes crear un subproceso en `onCreate()`, comenzar a ejecutarlo en `onStart()` y luego detenerlo en `onStop()`. También considera la posibilidad de utilizar **AsyncTask o HandlerThread**, en lugar de la clase Thread tradicional.
Recuerda que si utilizas un *servicio*, este se ejecuta en el subproceso principal de tu aplicación de forma predeterminada, por lo que debes crear un subproceso nuevo dentro del servicio. Para crear un servicio, creamos una subclase de `Service`, y hay que reescribir algunos metodos de callback...
- `onStartCommand()`: Cuando una Activity solicita iniciar el servicio, llamamos a `startService()`. Una vez que se ejecuta este método, el servicio se inicia y se puede ejecutar en segundo plano de manera indefinida. Si implementas esto, **será tu responsabilidad detener el servicio** cuando tu trabajo esté terminado. Para ello, llama a `stopSelf()` o `stopService()`.
- `onBind()`: El sistema llama a este método cuando otro componente desea enlazarse con el servicio, llamamos a `bindService()`.
[Ver mas sobre servicios](https://developer.android.com/guide/components/services?hl=es)
###Preferencias
Cada preferencia se almacenará en forma de clave-valor, es decir, cada una de ellas estará compuesta por un identificador único. Tenemos `SharedPreferences` y `PreferenceActivity`.
####SharedPreferences (preferencias compartidas)
Además, y a diferencia de SQLite, los datos no se guardan en un fichero binario de base de datos, sino en ficheros **XML** (/data/data/.../shared_prefs/MisPreferencias.xml).
Nos centramos en la clase `SharedPreferences` y obtendremos nuestra data con `getSharedPrefences()`, este método necesita el identificador y el modo de acceso. Este modo de acceso indicará qué aplicaciones tendrán acceso a la colección de preferencias y qué operacioens tendrán permitido realizar sobre ellas.
- ` MODE_PRIVATE`: Solo nuestra app tiene acceso a estas preferencias.
- `MODE_WORLD_READABLE`[deprecated]: Todas las apps pueden leer las pref. pero solo nuestra app puede modificarlas.
- `MODE_WORLD_WRITABLE`[deprecated]: Todas las aplicacioens pueden leer y modificar estas preferencias.
Ej., obtener nuestras preferencias:
```Java
SharedPreferences prefs = getSharedPreferences("MisPreferencias",Context.MODE_PRIVATE);
```
Podemos obtener, insertar o modificar preferencias utilizando los métodos get o put correspondientes al tipo de dato de cada preferencia:
```Java
SharedPreferences prefs =
getSharedPreferences("MisPreferencias",Context.MODE_PRIVATE);
String correo = prefs.getString("email", "<EMAIL>");
```
####PreferenceActivity
Un `Preference Activity` es una pantalla donde podremos seleccionar las preferencias de una App. Esta pantalla la definiremos mediante un XML, asi como cualquier layout, aunque este irá en `/res/xml`. El contenedor principal de nuestra pantalla será el elmento `<PreferenceScreen>`. Este elemento representará a la pantalla de opciones en sí, dentro de la cual incluiremos el resto de elementos. Esta pantalla puede tener categorias, estas las especificaremos con `<PreferenceCategory>`, cada categoria tendra todas sus opciones.
### Servicios SOAP
Android NO provee de manera nativa soporte para SOAP, pero hay una libreria que permite acceder a este tipo de servicios Web, hablamos de [ksoap2-android](http://simpligility.github.io/ksoap2-android/index.html).
### Tareas en Segundo Plano, Thread y AsyncTask.
####Thread y AsyncTask
Todos los componentes de una App, Activities, Servicios, Brodcast Receivers se ejecutan en el hilo principal `Main Thread` o `GUI thread`. Aquí es donde se ejecutan todas las operaciones que gestionan la interfaz de usuario de la aplicación.
<file_sep>/RetrofitTuto/app/src/main/java/com/example/ricardomontesinos/retrofittuto/models/Catalog.java
package com.example.ricardomontesinos.retrofittuto.models;
import java.util.List;
/**
* Created by ricardo.montesinos on 9/13/16.
*/
public class Catalog {
public List<Course> courses;
// public List<Track> tracks;
}
<file_sep>/TabsAndroid/app/src/main/java/com/example/ricardomontesinos/tabsandroid/MainActivity.java
package com.example.ricardomontesinos.tabsandroid;
import android.content.res.Resources;
import android.support.v7.app.AppCompatActivity;
import android.os.Bundle;
import android.util.Log;
import android.widget.TabHost;
public class MainActivity extends AppCompatActivity {
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
//para utilizar los assets
Resources res = getResources();
TabHost tabs = (TabHost)findViewById(android.R.id.tabhost);
tabs.setup();
//creamos un objeto de tipo Tabspec para cada una de las pestañas que queramos añadir mediante el metodo newTabSpect()
//pasando como parametro una etiqueta identificativa "mitab1"
//tab 1
TabHost.TabSpec spec = tabs.newTabSpec("mitab1");
spec.setContent(R.id.tab1);
spec.setIndicator("",res.getDrawable(android.R.drawable.ic_btn_speak_now));
tabs.addTab(spec);
//tab 2, reescribiendo variable spec
spec = tabs.newTabSpec("mitab2");
spec.setContent(R.id.tab2);
spec.setIndicator("TAB2", res.getDrawable(android.R.drawable.ic_dialog_map));
tabs.addTab(spec);
tabs.setCurrentTab(0);
//Eventos del TabHost
//Nos informa cuando cambiamos de pestaña
tabs.setOnTabChangedListener(new TabHost.OnTabChangeListener() {
@Override
public void onTabChanged(String tabId) {
// recibimos la tag de la pestaña identificativa, no su ID
Log.i("TABS","Pulsada la pestaña: "+tabId);
}
});
}
}
<file_sep>/RetrofitTuto/app/src/main/java/com/example/ricardomontesinos/retrofittuto/models/Instructor.java
package com.example.ricardomontesinos.retrofittuto.models;
/**
* Created by ricardo.montesinos on 9/13/16.
*/
public class Instructor {
public String name;
public String bio;
}
| 7d0dfe9d134c625b661164a201d984d283b27109 | [
"Java",
"Markdown"
] | 5 | Java | richimf/TutosAndroid | abf55a96a1bfa0e7bfd10a0e64bcdd801607ec10 | 1025dbe901a06f3716c535d8ecc2ca8cdcf4c5ba |
refs/heads/master | <file_sep># onekue.github.io | 57bd7671c5c6f284f145e824f94a740249c8e193 | [
"Markdown"
] | 1 | Markdown | onekue/onekue.github.io | 767f534e23004f007cedfa7d08c4b8e0fd775dd8 | a6e6bd94ee505ab1c74bd38a58929c6b70326116 |
refs/heads/master | <file_sep># cording: utf-8
from si_cfd import SiData
from si_cfd import CalDiff
import matplotlib.pyplot as plt
def plot_data(x, qn, time):
plt.plot(x, qn, marker='o', lw=2, label=f'time={time}')
plt.figure(figsize=(7, 7), dpi=100)
plt.rcParams["font.size"] = 10
data = []
x = []
start_x = 0
end_x = 2.0
dx = 0.01
dt = 0.01
i = 0
while start_x + dx * i <= end_x:
x.append(start_x + dx * i)
if x[i] <= 1.0:
data.append(1)
else:
data.append(0)
i = i + 1
time = 0
end_time = 0.3
q = SiData()
#cal = CalDiff("FTCS")
#cal = CalDiff("Lax_diff")
cal = CalDiff("advance_diff")
#cal = CalDiff("Lax_Wendroff_diff")
q.set_data(dt, dx, data)
plot_data(x, q.data, time)
n = 0
plot_dn = 2 * (0.05 / dt)
while time <= end_time:
n = n + 1
time = round(n * dt, 1)
qn = (cal.Dt(q) + cal.Dx(q, 1)).solve()
q.update(qn, time)
q.boundary()
if n % plot_dn == 0:
plot_data(x, q.data, time)
#time = round(n*dt,1)
# 図の後処理
plt.xlim([0, 2])
# plt.ylim([0,1.4])
plt.xlabel('x')
plt.ylabel('q')
plt.legend(loc="bottom left")
plt.show()
<file_sep>import numpy as np
class SiSolData():
An=np.array([]) #求める変数の係数
Ab=np.zeros([]) #その他の係数
def __init__(self, dataN):
self.An=np.zeros((dataN,dataN))
self.Ab=np.zeros(dataN)
def __add__(self, other):
self.An = self.An + other.An
self.Ab = self.Ab + other.Ab
return self
def solve(self):
invAn = np.linalg.inv(self.An)
Ab_T = np.array([self.Ab]).T
result = np.dot(invAn, -Ab_T)
#print ("invAn : \n" ,invAn)
#print ("Ab : \n", self.Ab)
#print ("Ab_T : \n" ,-Ab_T)
#print ("result : \n", np.dot(invAn,-Ab_T))
return result.T[0]
class CalDiff():
###Dt
def forward_diff_Dt(self, si_data):
n=si_data.N
dt=si_data.dt
soldata = SiSolData(n)
soldata.An[0][0]=1
soldata.Ab[0]=1
soldata.An[n-1][n-1]=1
soldata.Ab[n-1]=1
for i in range(1,n-1):
soldata.An[i][i]=1.0/dt
soldata.Ab[i]=(-(1.0/dt)*si_data.data[i])
return soldata
def Lax_diff_Dt(self, si_data):
n=si_data.N
dt=si_data.dt
soldata = SiSolData(n)
soldata.An[0][0]=1
soldata.Ab[0]=1
soldata.An[n-1][n-1]=1
soldata.Ab[n-1]=1
for i in range(1,n-1):
soldata.An[i][i]=1.0/dt
soldata.Ab[i]=(-(1.0/dt)*(si_data.data[i+1]+si_data.data[i-1])/2.0)
return soldata
def Dt(self, sol_name, si_data):
if sol_name == "forward_diff" :
return self.forward_diff_Dt(si_data)
if sol_name == "Lax_diff" :
return self.Lax_diff_Dt(si_data)
if sol_name == "Lax_Wendroff_diff":
return self.forward_diff_Dt(si_data)
else :
print("The func type name '{0}' of Dt is not defined.".format(sol_name))
###Dx
def centor_diff_Dx(self, si_data, coef):
n=si_data.N
soldata = SiSolData(n)
soldata.An[0][0]=0
soldata.Ab[0]=si_data.data[0]
soldata.An[n-1][n-1]=0
soldata.Ab[n-1]=si_data.data[n-1]
for i in range(1,n-1):
soldata.Ab[i]=coef*(si_data.data[i+1]-si_data.data[i-1])/(2.0*si_data.dx)
return soldata
def advance_diff_Dx(self, si_data, coef):
n=si_data.N
coef_ab=abs(coef)
soldata = SiSolData(n)
soldata.An[0][0]=0
soldata.Ab[0]=si_data.data[0]
soldata.An[n-1][n-1]=0
soldata.Ab[n-1]=si_data.data[n-1]
for i in range(1,n-1):
#soldata.Ab[i]=coef*(si_data.data[i]-si_data.data[i-1])/(si_data.dx)
soldata.Ab[i]=(coef + coef_ab)*0.5*(si_data.data[i]-si_data.data[i-1])/(si_data.dx) + (coef - coef_ab)*0.5*(si_data.data[i+1]-si_data.data[i])/(si_data.dx)
return soldata
def Lax_Wendroff_diff_Dx(self, si_data, coef):
n=si_data.N
dt=si_data.dt
soldata = SiSolData(n)
soldata.An[0][0]=0
soldata.Ab[0]=si_data.data[0]
soldata.An[n-1][n-1]=0
soldata.Ab[n-1]=si_data.data[n-1]
for i in range(1,n-1):
soldata.Ab[i]=coef*(si_data.data[i+1]-si_data.data[i-1])/(2.0*si_data.dx) - (coef**2)*dt*(si_data.data[i+1]-2.0*si_data.data[i] + si_data.data[i-1])/(2.0*si_data.dx**2)
return soldata
#cal.Dx(sol_name, si_data, coef):
def Dx(self, *args):
sol_name = args[0]
si_data = args[1]
try:
coef = args[2]
except:
coef = 1
if sol_name == "centor_diff" :
return self.centor_diff_Dx(si_data, coef)
elif sol_name == "advance_diff" :
return self.advance_diff_Dx(si_data,coef)
elif sol_name == "Lax_diff" :
return self.centor_diff_Dx(si_data, coef)
elif sol_name == "Lax_Wendroff_diff":
return self.Lax_Wendroff_diff_Dx(si_data, coef)
else :
print("The func type name '{0}' of Dx is not defined.".format(sol_name))
class SiData():
#data=np.array([])
#old_data=np.array([])
def set_data(self, delta_t, delta_x, data):
self.dt = delta_t
self.dx = delta_x
self.N = len(data)
self.data = np.array([])
self.time = 0.0
for i in range(0, self.N):
self.data=np.append(self.data, data[i])
def Dt(self, sol_name):
return CalDiff().Dt(sol_name, self)
#argsは指定された物理量にかかる係数:c
def Dx(self, sol_name):
return CalDiff().Dx(sol_name, self)
def boundary(self):
n=self.N
self.data[0]=self.data_old[0]
self.data[n-1]=self.data_old[n-1]
def update(self, data, time):
self.data_old = self.data
self.data = data
self.time = time
def test(self, *args):
for element in args :
print("test: {0}\n".format(element))
print(args)
print(args[0])
test_data = data
print(test_data)
data=[5,6,7]
q=SiData()
#q.test(1)
#q.test(2,3,4)
#q.test(1,2,3,4,data)
#初期化
#data=[1,1,0,0]
#dx = 0.1
#dt = 1
#time = dt
#q=SiData()
#q.set_data(dt, dx, data)
#print("time : ",q.time,"data:",q.data)
#計算
#qn = (q.Dt("forward_diff") + q.Dx("centor_diff")).solve() #Dxの係数を変更したい
#q.update(qn,time)
#q.boundary()
#結果表示
#print("time : ",q.time,"data:",q.data)
| 08d6815d4d4eddfd01f69836dd347423b83e7a44 | [
"Python"
] | 2 | Python | siramin1124/cfd_python_project | 5b70baf79fa21ddd27309b41d9e4dc2fde37efbc | 7f02c4bbd41bbdb0bd45a32f8338790bffd93969 |
refs/heads/master | <repo_name>Nyasha-Nziboi/CSS-Notes<file_sep>/README.md
# CSS-Notes
Some useful CSS I use on my projects
<file_sep>/gridUi/main.css
body,
html {
height: 100vh;
}
/* Mobile First */
body {
margin: 0;
display: grid;
grid-template-columns: 100%;
grid-auto-rows: repeat(auto, 5);
grid-template-areas: "section1" "section2" "section3" "main" "footer";
}
aside {
grid-area: sidebar;
background-color: #007ff0;
}
header {
grid-area: header;
background-color: #91c8ff;
}
section:nth-of-type(1) {
grid-area: section1;
background-color: #b3d8fd;
}
section:nth-of-type(2) {
grid-area: section2;
background-color: #5386af;
}
section:nth-of-type(3) {
grid-area: section3;
background-color: #6d9fd2;
}
main {
grid-area: main;
background-color: #7da9d5;
}
footer {
grid-area: footer;
background-color: #588ec3;
}
/* Desktop */
@media only screen and (min-width: 768px) {
body {
margin: 0;
display: grid;
grid-template-columns: auto 27% 27% 27%;
grid-auto-rows: 8% 30% auto 10%;
grid-template-areas: "sidebar header header header" "sidebar section1 section2 section3" "sidebar main main main"
"sidebar footer footer footer";
}
}
<file_sep>/cssVar/css/main.scss
body {
height: 100vh;
margin: 0;
&::after {
content: ' ';
position: absolute;
top: 0;
left: 0;
background: red;
height: var(--y);
width: var(--x);
transition: all 1s;
}
}
| 5a15ee02d56356af3ee4a13897291ab2bd47e1e4 | [
"Markdown",
"SCSS",
"CSS"
] | 3 | Markdown | Nyasha-Nziboi/CSS-Notes | 077994cba1215be2df027950bb689007b819b729 | 085e899e6cf10c42ab37bdd69c570fe76ec115d5 |
refs/heads/master | <file_sep>// const assert = require("assert");
const { should, expect, assert } = require("chai");
const { add, mul } = require("../src/math");
// if (add(2, 3) === 5) {
// console.log("add(2, 3) === 5");
// } else {
// console.log("add(2, 3) !== 5");
// }
// assert.equal(add(2, 3), 6);
should();
add(2, 3).should.equal(5);
expect(add(2, 3)).to.equal(6);
| 8fb8a07bba2233b6728a9d85870592db8bf72a3c | [
"JavaScript"
] | 1 | JavaScript | weiyanan201/test | 49585b97c224938a6af2029709fd042d82e25692 | 82619d11b12385a033744a7d3626e937aa2eb124 |
refs/heads/master | <file_sep>'use strict';
var assert = require('assert');
describe('test basic once', function() {
var path = require('path');
var once = require(path.resolve(process.cwd(), 'once', 'index'));
it('basic test', function() {
var called = 0;
var init = once(function() {
return ++called;
});
init();
init();
init();
assert.equal(called, 1, '基本测试失败');
});
it('will return the value from the original call for later calls', function() {
var t = 10;
var init = once(function() {
return ++t;
});
var ret = init();
assert.equal(init(), ret, '永远返回首次运行值测试失败');
assert.equal(init(), ret, '永远返回首次运行值测试失败');
});
it('gets called with context', function() {
var ctx;
var init = once(function() {
ctx = this;
});
init.call('TH');
init.call(91);
assert.equal(ctx, 'TH', '带上下文测试失败');
});
it('gets called with arguments', function() {
var args;
var init = once(function() {
args = [].slice.call(arguments);
});
init('hello');
init('world');
assert.equal(args[0], 'hello', '参数不变测试失败');
});
it('used mulitple times', function() {
var t = 0,
m = 99;
var init1 = once(function() {
return t++;
});
var init2 = once(function() {
return m++;
});
init1();
init1();
init2();
init2();
assert.equal(init1(), 0, '多次调用测试失败');
assert.equal(init2(), 99, '多次调用测试失败');
});
});
<file_sep>'use strict';
/**
*
* 尝试完成如下功能:
*
* var arr1 = [3, 6, 9];
* var arr2 = [1, 6, 8];
*
* var diff = difference(arr1, arr2);
*
* console.log(diff); // [3, 9]
*
**/
var difference = function(arr1, arr2) {
//在这里实现
};
module.exports = difference;
<file_sep>'use strict';
var assert = require('assert');
describe('test basic difference', function() {
var path = require('path');
var difference = require(path.resolve(process.cwd(), 'difference', 'index'));
it('basic test', function() {
var arr1 = ['nan', 'feng', 'hao'];
var arr2 = ['aa', 'xiao', 'hao'];
var res = difference(arr1, arr2);
assert.deepEqual(res, ['nan', 'feng'], '基本测试失败');
});
it('should match NaN', function() {
var arr1 = [1, NaN, 3];
var arr2 = [NaN, 5, NaN];
var res = difference(arr1, arr2);
assert.deepEqual(res, [1, 3], 'NaN匹配测试失败');
});
it('should match NaN2', function() {
var arr1 = [1, NaN, 3];
var arr2 = [5, 'k'];
var res = difference(arr1, arr2);
assert.equal(res.toString(), '1,NaN,3', 'NaN2匹配测试失败');
});
});
<file_sep>'use strict';
/**
*
* 尝试完成如下功能:
*
* var say = function(name, words, callback){
* setTimeout(function(){
* console.log('Hi ' + name + '! ' + words);
* callback({
* status: 'FINISHED'
* });
* });
* };
*
* var thunkSay = thunkify(say);
*
* thunkSay('ZhangSan', 'You are freak.')(function(data){
* console.log(data); // { status: 'FINISHED' }
* });
*
**/
var thunkify = function(func) {
return function() {
var _this = this;
var args = Array.prototype.slice.call(arguments);
return function(cb) {
try {
func.apply(_this, args.concat([cb]));
} catch (e) {
cb(e);
}
};
};
};
module.exports = thunkify;
<file_sep>'use strict';
/**
*
* 尝试完成如下功能:
*
* var mw = new Middleware();
*
* mw.use(function(next) {
* var self = this;
* setTimeout(function() {
* self.hook1 = true;
* next();
* }, 10);
* });
*
* mw.use(function(next) {
* var self = this;
* setTimeout(function() {
* self.hook2 = true;
* next();
* }, 10);
* });
*
* var startTime = new Date();
*
* mw.start(function() {
* console.log(this.hook1); // true
* console.log(this.hook2); // true
* console.log(new Date() - startTime); // around 20
* });
*
**/
var Middleware = function() {
this.pool = [];
};
Middleware.prototype.use = function(cb) {
this.pool.push(cb.bind(this));
};
Middleware.prototype.start = function(cb) {
var _this = this;
var pullOut = function() {
if (_this.pool.length === 0) {
return cb.call(_this);
}
_this.pool.shift()(pullOut);
};
pullOut();
};
module.exports = Middleware;
<file_sep>'use strict';
/**
*
* 尝试完成如下功能:
*
* var mw = new Middleware();
*
* mw.use(function(next) {
* var self = this;
* setTimeout(function() {
* self.hook1 = true;
* next();
* }, 10);
* });
*
* mw.use(function(next) {
* var self = this;
* setTimeout(function() {
* self.hook2 = true;
* next();
* }, 10);
* });
*
* var startTime = new Date();
*
* mw.start(function() {
* console.log(this.hook1); // true
* console.log(this.hook2); // true
* console.log(new Date() - startTime); // around 20
* });
*
**/
var Middleware = function() {
//在这里实现
};
module.exports = Middleware;
<file_sep>'use strict';
/**
*
* 该模块必须完成以下功能:
*
* var currying = require('currying');
*
* var add = function(a, b) {
* return a + b;
* };
*
* var curried = curry(add);
* console.log(curried(1)(2)); // 3
*
**/
var currying = function(func) {
var len = func.length;
var getCurry = function(params) {
return function() {
var next = params.concat(Array.prototype.slice.call(arguments));
if (len - next.length <= 0) {
return func.apply(this, next);
}
return getCurry(next);
};
};
return getCurry([]);
};
module.exports = currying;
| 0d58ef78ffe34aa6972fc1224c6ef53bc9bd69b7 | [
"JavaScript"
] | 7 | JavaScript | VtanSen/fe-interview-1 | 24a629b3911bebf40a8a8d0d6aed1132d996ce42 | aad56696922e9efde71f604c751e5db0639e5e32 |
refs/heads/main | <file_sep># Hello world
time is limited
| ec5be0c167fc51f6f096a21b9a9d3303fb21da78 | [
"Markdown"
] | 1 | Markdown | HEFEIFEI-CLAODE/learning-folder | 7041011df076d3c05f5bafbc6b4723c210642a5d | 344fec0ad9d7a41d9bd7d30eadbf42a9ee330378 |
refs/heads/master | <file_sep>require "rails_helper"
RSpec.describe NrelService do
context "#nearest_alt_fuel_station" do
it "returns electric and propane fuel stations within a 6mi radius" do
VCR.use_cassette("nrel_service#nearest_alt_fuel_station") do
service = NrelService.new
response = service.nearest_alt_fuel_station(location: "60073", radius: "6.0", fuel_type: "ELEC,LPG")
expect(response[:total_results]).to eq(7)
expect(response[:fuel_stations][0][:fuel_type_code]).to eq("LPG")
expect(response[:fuel_stations][0][:id]).to eq(21207)
expect(response[:fuel_stations][0][:city]).to eq("Lake Villa")
end
end
end
end
<file_sep>---
http_interactions:
- request:
method: get
uri: http://developer.nrel.gov/api/alt-fuel-stations/v1/nearest.json?api_key=<KEY>&fuel_type=ELEC,LPG&location=60073&radius=6.0
body:
encoding: US-ASCII
string: ''
headers:
User-Agent:
- Faraday v0.9.2
Accept-Encoding:
- gzip;q=1.0,deflate;q=0.6,identity;q=0.3
Accept:
- "*/*"
response:
status:
code: 200
message: OK
headers:
Access-Control-Allow-Origin:
- "*"
Age:
- '0'
Cache-Control:
- max-age=0, private, must-revalidate
Content-Type:
- application/json; charset=utf-8
Date:
- Mon, 18 Apr 2016 15:44:12 GMT
Last-Modified:
- Mon, 18 Apr 2016 15:44:12 GMT
Server:
- openresty
Vary:
- Accept-Encoding
- Accept-Encoding
- Origin
Via:
- http/1.1 api-umbrella (ApacheTrafficServer [cMsSf ])
X-Cache:
- MISS
X-Ratelimit-Limit:
- '1000'
X-Ratelimit-Remaining:
- '995'
X-Request-Id:
- cf7f1643-7950-4f5a-9251-86a498c0bca0
X-Runtime:
- '0.011027'
Transfer-Encoding:
- chunked
Connection:
- keep-alive
body:
encoding: ASCII-8BIT
string: '{"latitude":42.3612999,"longitude":-88.0899304,"precision":{"name":"postal_code","types":["postal_code"],"value":5},"station_locator_url":"http://www.afdc.energy.gov/afdc/locator/stations/","total_results":7,"station_counts":{"total":10,"fuels":{"E85":{"total":0},"ELEC":{"total":8,"stations":{"total":5}},"HY":{"total":0},"LNG":{"total":0},"BD":{"total":0},"CNG":{"total":0},"LPG":{"total":2}}},"offset":0,"fuel_stations":[{"access_days_time":"8am-5pm
M-F, 8am-12pm Sat","cards_accepted":"Cash D M V","date_last_confirmed":"2015-11-03","expected_date":null,"fuel_type_code":"LPG","id":21207,"groups_with_access_code":"Public
- Call ahead","open_date":null,"owner_type_code":"P","status_code":"E","station_name":"Hicksgas","station_phone":"847-356-8225","updated_at":"2016-01-13T18:00:20Z","geocode_status":"200-8","latitude":42.416261,"longitude":-88.078478,"city":"Lake
Villa","intersection_directions":"Next to tracks","plus4":null,"state":"IL","street_address":"200
E Grand Ave","zip":"60046","bd_blends":null,"e85_blender_pump":null,"ev_connector_types":null,"ev_dc_fast_num":null,"ev_level1_evse_num":null,"ev_level2_evse_num":null,"ev_network":null,"ev_network_web":null,"ev_other_evse":null,"hy_status_link":null,"lpg_primary":false,"ng_fill_type_code":null,"ng_psi":null,"ng_vehicle_class":null,"distance":3.83854},{"access_days_time":"6:30am-10pm
M-Sat, 8am-8pm Sun","cards_accepted":"A Checks Cash D M V","date_last_confirmed":"2015-11-03","expected_date":null,"fuel_type_code":"LPG","id":50497,"groups_with_access_code":"Public","open_date":null,"owner_type_code":"P","status_code":"E","station_name":"Menards","station_phone":"847-973-3050","updated_at":"2016-04-08T12:39:15Z","geocode_status":"GPS","latitude":42.364053,"longitude":-88.171275,"city":"Fox
Lake","intersection_directions":null,"plus4":null,"state":"IL","street_address":"1400
S US Highway 12","zip":"60020","bd_blends":null,"e85_blender_pump":null,"ev_connector_types":null,"ev_dc_fast_num":null,"ev_level1_evse_num":null,"ev_level2_evse_num":null,"ev_network":null,"ev_network_web":null,"ev_other_evse":null,"hy_status_link":null,"lpg_primary":true,"ng_fill_type_code":null,"ng_psi":null,"ng_vehicle_class":null,"distance":4.16818},{"access_days_time":"Service
center use only","cards_accepted":null,"date_last_confirmed":"2015-11-03","expected_date":null,"fuel_type_code":"ELEC","id":70471,"groups_with_access_code":"Private","open_date":"2011-08-29","owner_type_code":"P","status_code":"E","station_name":"Zeigler
Nissan","station_phone":"847-264-4572","updated_at":"2016-01-13T18:00:32Z","geocode_status":"200-8","latitude":42.386942,"longitude":-88.0003053,"city":"Lake
Villa","intersection_directions":null,"plus4":null,"state":"IL","street_address":"18850
W Grand Ave","zip":"60046","bd_blends":null,"e85_blender_pump":null,"ev_connector_types":["J1772"],"ev_dc_fast_num":null,"ev_level1_evse_num":null,"ev_level2_evse_num":1,"ev_network":null,"ev_network_web":null,"ev_other_evse":null,"hy_status_link":null,"lpg_primary":null,"ng_fill_type_code":null,"ng_psi":null,"ng_vehicle_class":null,"distance":4.91649},{"access_days_time":"Delaership
business hours","cards_accepted":null,"date_last_confirmed":"2015-11-03","expected_date":null,"fuel_type_code":"ELEC","id":70446,"groups_with_access_code":"Public
- Call ahead","open_date":"2011-08-29","owner_type_code":"P","status_code":"E","station_name":"Zeigler
Nissan","station_phone":"847-264-4572","updated_at":"2016-01-13T18:00:32Z","geocode_status":"200-8","latitude":42.386942,"longitude":-88.0003053,"city":"Lake
Villa","intersection_directions":null,"plus4":null,"state":"IL","street_address":"18850
W Grand Ave","zip":"60046","bd_blends":null,"e85_blender_pump":null,"ev_connector_types":["J1772"],"ev_dc_fast_num":null,"ev_level1_evse_num":null,"ev_level2_evse_num":1,"ev_network":null,"ev_network_web":null,"ev_other_evse":null,"hy_status_link":null,"lpg_primary":null,"ng_fill_type_code":null,"ng_psi":null,"ng_vehicle_class":null,"distance":4.91649},{"access_days_time":"24
hours daily","cards_accepted":null,"date_last_confirmed":"2016-04-18","expected_date":null,"fuel_type_code":"ELEC","id":65779,"groups_with_access_code":"Public","open_date":null,"owner_type_code":null,"status_code":"E","station_name":"<NAME>","station_phone":"888-758-4389","updated_at":"2016-04-18T08:17:28Z","geocode_status":"GPS","latitude":42.3869571,"longitude":-88.0000714,"city":"Lake
Villa","intersection_directions":"QUICK CHARGER 1; -","plus4":null,"state":"IL","street_address":"18850
W Grand Ave","zip":"60046","bd_blends":null,"e85_blender_pump":null,"ev_connector_types":["CHADEMO"],"ev_dc_fast_num":1,"ev_level1_evse_num":null,"ev_level2_evse_num":null,"ev_network":"ChargePoint
Network","ev_network_web":"http://www.mychargepoint.net/","ev_other_evse":null,"hy_status_link":null,"lpg_primary":null,"ng_fill_type_code":null,"ng_psi":null,"ng_vehicle_class":null,"ev_network_ids":{"posts":["1:99109"]},"distance":4.92803},{"access_days_time":"ComEd
fleet use only","cards_accepted":null,"date_last_confirmed":"2016-03-07","expected_date":null,"fuel_type_code":"ELEC","id":73384,"groups_with_access_code":"Private","open_date":"2016-01-05","owner_type_code":"T","status_code":"E","station_name":"ComEd","station_phone":"630-576-6223","updated_at":"2016-03-07T22:18:45Z","geocode_status":"200-8","latitude":42.3025,"longitude":-88.007327,"city":"Libertyville","intersection_directions":null,"plus4":null,"state":"IL","street_address":"1500
Franklin Blvd","zip":"60048","bd_blends":null,"e85_blender_pump":null,"ev_connector_types":["J1772"],"ev_dc_fast_num":null,"ev_level1_evse_num":null,"ev_level2_evse_num":4,"ev_network":null,"ev_network_web":null,"ev_other_evse":null,"hy_status_link":null,"lpg_primary":null,"ng_fill_type_code":null,"ng_psi":null,"ng_vehicle_class":null,"distance":5.86234},{"access_days_time":"8am-5pm
daily; mainly for employee use","cards_accepted":null,"date_last_confirmed":"2016-03-07","expected_date":null,"fuel_type_code":"ELEC","id":73395,"groups_with_access_code":"Public
- Call ahead","open_date":"2013-01-04","owner_type_code":"T","status_code":"E","station_name":"ComEd","station_phone":"630-576-6223","updated_at":"2016-03-07T22:18:45Z","geocode_status":"200-8","latitude":42.3025,"longitude":-88.007327,"city":"Libertyville","intersection_directions":null,"plus4":null,"state":"IL","street_address":"1500
Franklin Blvd","zip":"60048","bd_blends":null,"e85_blender_pump":null,"ev_connector_types":["J1772"],"ev_dc_fast_num":null,"ev_level1_evse_num":null,"ev_level2_evse_num":1,"ev_network":null,"ev_network_web":null,"ev_other_evse":null,"hy_status_link":null,"lpg_primary":null,"ng_fill_type_code":null,"ng_psi":null,"ng_vehicle_class":null,"distance":5.86234}]}'
http_version:
recorded_at: Mon, 18 Apr 2016 15:44:11 GMT
recorded_with: VCR 3.0.1
<file_sep>class Station
attr_reader :name,
:address,
:city,
:state,
:hours,
:type,
:distance
def initialize(data)
@name = data[:station_name]
@address = data[:street_address]
@state = data[:state]
@city = data[:city]
@hours = data[:access_days_time]
@type = data[:fuel_type_code]
@distance = data[:distance]
end
def self.find_by(zip_code)
service = NrelService.new
data = service.nearest_alt_fuel_station(
location: zip_code,
radius: "6.0",
fuel_type: "ELEC,LPG",
limit: "10"
)
data[:fuel_stations].map do |station|
new(station)
end
end
end
<file_sep>class NrelService
def initialize
@_connection = Faraday.new("http://developer.nrel.gov")
connection.params["api_key"] = ENV["NREL_API_KEY"]
end
def nearest_alt_fuel_station(params)
parse(connection.get("/api/alt-fuel-stations/v1/nearest.json", params))
end
private
def connection
@_connection
end
def parse(response)
(JSON.parse(response.body)).deep_symbolize_keys
end
end
<file_sep><ul class="stations">
<% @stations.each do |station| %>
<li>
<h1><%= station.name %></h1>
<h1><%= station.address %>, <%= station.city %>, <%= station.state %></h1>
<h1><%= station.hours %></h1>
<h1><%= station.type %></h1>
<h1><%= station.distance %> mi</h1>
</li>
<% end %>
</ul>
<file_sep>require "rails_helper"
RSpec.feature "User can search for ten closests stations" do
scenario "they see the ten closests stations info" do
VCR.use_cassette("closest_stations_by_zip") do
visit root_path
fill_in "q", with: "80203"
click_button "Locate"
# expect(current_path).to eq("/search?zip=80203")
list = []
list = find(".stations").all("li")
expect(list.size).to eq(10)
expect(list[0]).to have_content("ELEC")
expect(list[0]).to have_content("800 Acoma St, Denver, CO")
expect(list[0]).to have_content("0.3117 mi")
expect(list[0]).to have_content("UDR")
expect(list[0]).to have_content("24 hours daily")
expect(list[9]).to have_content("ELEC")
expect(list[9]).to have_content("1.13847 mi")
expect(list[9]).to have_content("PUBLIC STATIONS")
expect(list[9]).to have_content("1286-1296 Stout St, Denver, CO")
expect(list[9]).to have_content("24 hours daily")
list.each do |station|
expect(station).to_not have_content("BD")
expect(station).to_not have_content("CNG")
expect(station).to_not have_content("E85")
expect(station).to_not have_content("HY")
expect(station).to_not have_content("LNG")
end
end
end
end
| 8f72a35e71fd32363a5a7320947aa2e4381436f7 | [
"HTML+ERB",
"Ruby",
"YAML"
] | 6 | HTML+ERB | julsfelic/module_3_diagnostic | bbe8dad8ae12b6f8604ff7a4f84dcbaf51d79bc0 | 02694a74bf4ae3742955b5c71bd557ccdc5004d9 |
refs/heads/master | <repo_name>kin3tics/photo-color-viz<file_sep>/README.md
# photo-color-viz
| 33c4ad4bb6ad06dce308a7db86293d8e7211a80b | [
"Markdown"
] | 1 | Markdown | kin3tics/photo-color-viz | df83034a7e61169b933bb14098cb85f44c5fdda8 | 7cac43d82f7538040cf197f884b1cad8cc570d1e |
refs/heads/master | <repo_name>Aakashdeveloper/dec_visual_mern<file_sep>/React/firstapp/src/index.js
import React, {Component} from 'react';
import ReactDOM from 'react-dom';
import Header from './components/Header';
import NewsList from './components/News_list';
import JSON from './db.json'
class App extends React.Component {
constructor(props){
super(props);
this.state = {
news: JSON,
filtered: JSON
}
}
filterNews(keyword){
let output = this.state.news.filter((data) => {
return data.title.toLowerCase().indexOf(keyword.toLowerCase()) > -1
})
this.setState({filtered: output })
}
render(){
return (
<div>
<Header searchKeyword={(keyword) => this.filterNews(keyword)}/>
<NewsList sendNews={this.state.filtered}></NewsList>
</div>
)
}
}
ReactDOM.render(<App/>, document.getElementById('root'))
/*
functional (stateless)
light in weight and less functionality
function add(a,b){
return a+b
}
let add = (a,b) => {
return a+b
}
Class based(stateful)
bit heavy as compare to functional but more functionality
state & props
JSX
*/
<file_sep>/NodeApi/app.js
const express = require('express');
const app = express();
const MongoClient = require('mongodb').MongoClient;
const port = 7900;
let db;
let mongourl = "mongodb://localhost:27017";
app.get('/', (req, res) => {
res.send("Welcome to Node Api")
})
app.get('/movies', (req,res) => {
db.collection('books').find().toArray((err, result) => {
if(err) throw err;
res.send(result)
})
})
app.get('/company', (req,res) => {
db.collection('first').find().toArray((err, result) => {
if(err) throw err;
res.send(result)
})
})
app.post('/insert', (req,res)=>{
let insertData = {name:'DEll', type:'Tech'}
db.collection('first').insert(insertData, (err, result) => {
if(err) throw err;
console.log('data inserted')
})
})
MongoClient.connect(mongourl, (err, client) => {
if(err) throw err;
db = client.db('acadgild_aug')
app.listen(port,()=>{
console.log('running on port number '+ port)
})
})
<file_sep>/React/secondapp/src/components/post_detail.js
import React, {Component} from 'react';
class PostsDetail extends Component{
constructor(props){
super(props)
}
render(){
console.log(this.props)
return (
<div class="panel panel-danger">
<div class="panel-heading">PostsDetail</div>
<div class="panel-body">
PostsDetail Panel Content for {this.props.match.params.name}
</div>
</div>
)
}
}
export default PostsDetail;<file_sep>/Node/moviesApp/app.js
let http = require('http');
let server = http.createServer(function(req, res){
res.write("<h1>"+"Server is running"+"</h1>")
res.end();
})
server.listen(6700);<file_sep>/React/redux2/src/containers/Galleries.js
import React, {Component} from 'react';
import { connect } from 'react-redux';
import { selectedGallery, clearselectedGallery} from '../actions'
import { bindActionCreators} from 'redux';
import Slider from 'react-slick';
import Counter from './LikesCounter';
const settings = {
dots: true,
infinite: true,
speed: 500,
slidesToShow: 1,
slidesToScroll: 1
};
class Galleries extends Component {
componentDidMount(){
this.props.selectedGallery(this.props.match.params.id)
}
componentWillUnmount(){
this.props.clearselectedGallery();
}
renderSlider = ({selected}) => {
if (selected) {
const gallery = selected[0]
return (
<div>
<h3>The best of {gallery.artist}</h3>
<Slider {...settings}>
{gallery.images.map((item,index) => {
return(
<div key={index} className="slide-item">
<div>
<div
className="image"
style={{background:`url(/images/galleries/${item.img})`}}
>
</div>
<div className="description">
<span>{item.desc}</span>
</div>
</div>
</div>
)
})}
</Slider>
<Counter
articleId={gallery.id}
type="HANDLE_LIKES_GALLERY"
section="galleries"
likes={gallery.likes[0]}
dislikes={gallery.likes[1]} />
</div>
)
}
}
render(){
return(
<div className="slide-item-wrap">
<div className="main-slider">
{this.renderSlider(this.props.gallery)}
</div>
</div>
)
}
}
function mapStateToProps(state){
return{
gallery:state.gallery
}
}
function mapDispatchToProps(disptach){
return bindActionCreators({selectedGallery, clearselectedGallery}, disptach)
}
export default connect(mapStateToProps, mapDispatchToProps)(Galleries);
var data = [
{"name":"aa", "classn":"angular", "rollno":1},
{"name":"bb", "classn":"angular", "rollno":2}
]
checkname= (name) =>{
out = name.i
return name
}
var jsonData = [{"name":"name1","value":"value1"},{"name":"name2","value":"value2"}];
for(var i=0;i<jsonData.length;i++){
if(jsonData[i]['name'] == 'name2'){
console.log('The value is: ' + jsonData[i]['value']);
break;
}
}
<file_sep>/Node/moviesApp/src/routes/artistRoute.js
import express from 'express';
let artistRouter = express.Router();
function router(nav){
artistRouter.route('/')
.get((req,res) => {
res.render('artist', {
title:'Artist Page',
nav})
})
artistRouter.route('/details')
.get((req,res) => {
res.render('artistDetail', {
title:'Artist Detail Page',
nav})
})
return artistRouter
}
module.exports = router;<file_sep>/DashBoard_NM-master/model/database.js
var mongodb = require('mongodb').MongoClient;
var express = require('express');
var app = express();
var MongoClient = require('mongodb').MongoClient;
var url = "mongodb://192.168.127.12:27017/";
var collection_name = 'users';
var dataBase = function(myobj){
MongoClient.connect(url, function(err, db) {
console.log(" i am here at addData")
if (err) throw err;
var dbo = db.db("testData1");
dbo.collection("first").insertOne(myobj, function(err, res) {
if (err) throw err;
console.log("1 document inserted");
db.close();
});
});
}
//Update Data
dataBase.prototype.update = function(query, myobj){
MongoClient.connect(url, function(err, db) {
if (err) throw err;
var dbo = db.db("testData1");
dbo.collection("second").update(query, myobj, function(err, res) {
if (err) throw err;
console.log("1 document updated");
db.close();
});
});
}
dataBase.prototype.delete = function(myquery){
MongoClient.connect(url, function(err,db){
if(err) throw err;
var dbo = db.db("testData1");
dbo.collection('second').deleteOne(myquery,function(err,res){
if(err) throw err;
console.log("data deleted");
db.close()
})
})
}
dataBase.prototype.findAll = function(colName, callback){
MongoClient.connect(url, function(err, db){
if(err) throw err;
var dbo = db.db("testData1")
dbo.collection(colName).find({}).toArray(
function(err,results){
callback(results)
})
})
}
module.exports = dataBase;<file_sep>/Node/moviesApp/server.js
import express from 'express';
import chalk from 'chalk';
import morgan from 'morgan'
let app = express();
app.use(express.static(__dirname+'/public'));
app.set('views', './src/views');
app.set('view engine', 'ejs')
app.use(morgan('tiny'))
var navArray = [{link:'/', title:'Home'},
{link:'/movies', title:'Movies'},
{link:'/artist', title:'Artits'}];
let moviesRouter = require('./src/routes/moviesRoute')(navArray)
let artistRouter = require('./src/routes/artistRoute')(navArray)
app.get('/', (req,res,err) => {
res.render('index', {title:'Home Page',nav:navArray})
})
app.use('/movies', moviesRouter);
app.use('/artist', artistRouter);
app.listen(3400,(err) => {
if(err){
console.log("error")
}
console.log(`server is running on port ${chalk.blue('3400')}`)
})
<file_sep>/DashBoard_NM-master/app.js
const express = require('express');
const app = express();
const bodyParser= require('body-parser');
const MongoClient = require('mongodb').MongoClient;
const PORT = 2000;
var db;
var mongourl = 'mongodb://localhost:27017/';
// var mongoClass = require('./model/database.js');
var collection_name = 'first';
//tell Express to make this public folder accessible to the public by using a built-in middleware called express.static
app.use(express.static(__dirname + '/public'));
// The urlencoded method within body-parser tells body-parser to extract data from the <form> element and add them to the body property in the request object
app.use(bodyParser.urlencoded({extended: true}));
app.use(bodyParser.json());
app.set('view engine', 'ejs')
app.set('views', './views');
app.post('/addData', (req, res) => {
console.log("Body received to add data is "+JSON.stringify(req.body));
db.collection(collection_name).save(req.body, (err, result) => {
if (err) return console.log(err)
console.log("result is "+JSON.stringify(result));
console.log('saved to database')
res.redirect('/')
})
})
app.get('/', (req, res) => {
db.collection(collection_name).find().toArray((err, result) => {
if (err) return console.log(err)
// renders index.ejs
console.log("Data received from db to show on index page is "+JSON.stringify(result));
res.render('index.ejs', {data: result})
// res.send(result)
})
})
app.post('/find_by_name', (req, res) => {
console.log("Body received to add data is "+JSON.stringify(req.body));
var name = req.body.id;
db.collection(collection_name).find({first_name: name}).toArray((err, result) => {
if (err) return console.log(err)
// renders index.ejs
console.log("Data received from db to show in Modal is "+JSON.stringify(result));
res.send(result)
})
})
app.get('/new', (req, res) => {
db.collection(collection_name).find().toArray((err, result) => {
if (err) return console.log(err)
res.render('admin.ejs', {data: result})
})
})
app.get('/bkp', (req, res) => {
db.collection(collection_name).find().toArray((err, result) => {
if (err) return console.log(err)
// renders index.ejs
res.render('bkp.ejs', {data: result})
})
})
app.put('/update_user', (req, res) => {
console.log('i was called, and i am updating the db');
console.log('Data received to update'+JSON.stringify(req.body));
console.log('_id is '+req.body._id);
db.collection(collection_name)
.findOneAndUpdate({"_id":req.body._id}, {
$set: {
last_name: req.body.last_name,
first_name: req.body.first_name,
email: req.body.email
}
}, {
sort: {_id: -1},
upsert: true
}, (err, result) => {
if (err) return res.send(err)
res.send(result)
})
})
app.delete('/delete_user', (req, res) => {
console.log('i was called, and i am deleting entry from the db');
console.log('Data i got is '+JSON.stringify(req.body));
db.collection(collection_name).findOneAndDelete({first_name: req.body.id},
(err, result) => {
if (err) return res.send(500, err)
res.send({message: 'success'})
})
})
// app.get('/all', (req, res) => {
//The toArray method takes in a callback function that allows us to do stuff with quotes we retrieved from database
// db.collection(collection_name).find({}).toArray(function(err, results) {
// console.log(results)
// send HTML file populated with quotes here
// })
//})
// app.listen(PORT, function() {
// console.log('+++++++++++++++++++++++ Server listening on PORT '+PORT);
// })
MongoClient.connect(mongourl, (err, client) => {
if (err) return console.log(err)
db = client.db('acadgild_aug') // whatever your database name is
app.listen(PORT, () => {
console.log('+++++++++++++++++++++++ Server listening on PORT '+PORT);
})
})<file_sep>/Node/moviesApp/src/routes/moviesRoute.js
import express from 'express';
import MongoClient from 'mongodb';
import 'babel-polyfill'
let moviesRouter = express.Router();
var movies = [
{
"_id": "5ab12612f36d2879268f284a",
"name": "<NAME>",
"language": "ENGLISH",
"rate": 4.5,
"type": "Action Adventure Fantasy",
"imageUrl": "https://image.ibb.co/f0hhZc/bp.jpg"
}
]
function router(nav){
moviesRouter.route('/')
.get((req,res) => {
const url = 'mongodb://localhost:27017';
const dbName= 'acadgild_aug';
(async function mongo(){
let client;
try{
client = await MongoClient.connect(url);
const db = client.db(dbName)
const col = await db.collection('books');
const movies = await col.find().toArray();
//res.send(movies)
res.render('movies', {
title:'Movies Page',
nav:nav,
data:movies})
}
catch(err){
console.log(err)
}
client.close();
}())
})
moviesRouter.route('/:id')
.get((req,res) => {
const {id} = req.params;
const url = 'mongodb://localhost:27017';
const dbName= 'acadgild_aug';
(async function mongo(){
let client;
try{
client = await MongoClient.connect(url);
const db = client.db(dbName)
const col = await db.collection('books');
const moviesDetails = await col.findOne({_id:id});
//res.send(movies)
res.render('detail', {
title:'Detail Page',
nav:nav,
data:moviesDetails})
}
catch(err){
console.log(err)
}
client.close();
}())
})
return moviesRouter
}
module.exports = router;<file_sep>/DashBoard_NM-master/public/delete.js
// var del = document.getElementById('delete')
// del.addEventListener('click', function () {
// fetch('delete_user', {
// method: 'delete',
// headers: {
// 'Content-Type': 'application/json'
// },
// body: JSON.stringify({
// 'name': null
// })
// })
// .then(res => {
// if (res.ok) return res.json()
// }).
// then(data => {
// console.log(data)
// window.location.reload()
// })
// })
$('.delete').click(function() {
id = this.id;
console.log('id is '+id);
$.ajax({
type: 'POST',
url: '/delete_user',
method: 'delete',
data: {"id":id},
success: function(data){
console.log('data is '+JSON.stringify(data));
window.location.reload()
},
error: function(){
alert('No data');
}
});
}); | 90d1164378f9acaf73e329ab8dfd616b007bb66c | [
"JavaScript"
] | 11 | JavaScript | Aakashdeveloper/dec_visual_mern | fba00ed969e11b95cfa0257b1cbcd9066ff5e235 | 6d9ce4e7319825f78ca5dfc56f5ef4620bf34ae2 |
refs/heads/master | <repo_name>GmAtlas/Vanguard<file_sep>/lua/vanguard/logs.lua
file.CreateDir("vanlogs/")
function Vanguard:Log( str )
if ( CLIENT ) then return end
local logFile = "vanlogs/" .. os.date( "%d-%m-%Y" ) .. ".txt"
local files = file.Find( "vanlogs/" .. os.date( "%d-%m-%Y" ) .. "*.txt", "DATA" )
table.sort( files )
if ( #files > 0 ) then logFile = "vanlogs/" .. files[math.max(#files-1,1)] end
local src = file.Read( logFile, "DATA" ) or ""
if ( #src > 200 * 1024 ) then
logFile = "vanlogs/" .. os.date( "%d-%m-%Y" ) .. " (" .. #files + 1 .. ").txt"
end
file.Append( logFile, "[" .. os.date() .. "] " .. str .. "\n" )
print("[" .. os.date() .. "] " .. str)
end<file_sep>/lua/autorun/vanguard.lua
if SERVER then
AddCSLuaFile( )
include("vanguard/init.lua")
else
include("vanguard/cl_init.lua")
end
<file_sep>/lua/vanguard/cl_init.lua
include("shared.lua")
hook.Add("Initialize","arbit_cl_init",function(ply)
print(anan)
end) <file_sep>/lua/vanguard/init.lua
include("shared.lua")
AddCSLuaFile("cl_init.lua")
AddCSLuaFile("shared.lua")
util.AddNetworkString("Vanguard_Message")<file_sep>/lua/vanguard/plugins/sh_slay.lua
PLUGIN.Title = "Slap"
PLUGIN.Description = "Slap a player."
PLUGIN.ChatCommand = "slap"
PLUGIN.Usage = "[players] [damage]"
PLUGIN.Privileges = { "Slap" }
function PLUGIN:Call( ply, args )
if ( ply:HasPrivilege("Slap") ) then
local players = Vanguard:FindPlayer( args[1], ply, true )
local dmg = math.abs( tonumber( args[ #args ] ) or 10 )
for _, pl in pairs( players ) do
pl:SetHealth( pl:Health() - dmg )
pl:ViewPunch( Angle( -10, 0, 0 ) )
if ( pl:Health() < 1 ) then pl:Kill() end
end
if ( #players > 0 ) then
Vanguard:Notify( ply:GetRankColor(), ply:Nick(), color_white, " has slapped ", Color(255,0,0), Vanguard:List(players), color_white, " with " .. 100 .. " damage." )
else
Vanguard:Notify( ply, Color(0,0,255),"No players found." )
end
else
Vanguard:Notify( ply, Color(255,0,0),"You're not allowed to use this command!" )
end
end<file_sep>/lua/vanguard/shared.lua
Vanguard = {}
Vanguard.Plugins = {}
Vanguard.StockRanks = {}
Vanguard.Ranks = {}
Vanguard.Players = {}
Vanguard.Privileges = {}
Vanguard.Ranks.Guest = {}
Vanguard.Ranks.Guest.Title = "Guest"
Vanguard.Ranks.Guest.UserGroup = "Guest"
Vanguard.Ranks.Guest.Color = Color(0,255,0)
Vanguard.Ranks.Guest.Immunity = 0
Vanguard.Ranks.Guest.IsAdmin = false
Vanguard.Ranks.Guest.CanTarget = {"Guest"}
Vanguard.Ranks.Guest.Privileges = {}
Vanguard.Ranks.Moderator = {}
Vanguard.Ranks.Moderator.Title = "Moderator"
Vanguard.Ranks.Moderator.UserGroup = "Moderator"
Vanguard.Ranks.Moderator.Color = Color(0,0,255)
Vanguard.Ranks.Moderator.Immunity = 1
Vanguard.Ranks.Moderator.IsAdmin = false
Vanguard.Ranks.Moderator.CanTarget = {"Guest","Moderator"}
Vanguard.Ranks.Moderator.Privileges = {"Slap"}
_VG = table.Copy(_G)
if not Vanguard_HOOKCALL then Vanguard_HOOKCALL = hook.Call end
local PMeta = FindMetaTable("Player")
if not file.Exists( "vanguard", "DATA" ) then
file.CreateDir( "vanguard" )
end
if SERVER then
include("logs.lua")
function Vanguard:Notify( ... )
local arg = { ... }
if ( type( arg[1] ) == "Player" or arg[1] == NULL ) then ply = arg[1] end
if ( ply != NULL ) then
net.Start( "Vanguard_Message" )
net.WriteUInt( #arg, 16 )
for _, v in ipairs( arg ) do
if ( type( v ) == "string" ) then
net.WriteBit(false)
net.WriteString( v )
elseif ( type ( v ) == "table" ) then
net.WriteBit(true)
net.WriteUInt( v.r, 8 )
net.WriteUInt( v.g, 8 )
net.WriteUInt( v.b, 8 )
net.WriteUInt( v.a, 8 )
end
end
if ply ~= nil then
net.Send(ply)
else
net.Broadcast()
end
end
local str = ""
end
else
function Vanguard:Notify( ... )
local arg = { ... }
args = {}
for _, v in ipairs( arg ) do
if ( type( v ) == "string" or type( v ) == "table" ) then table.insert( args, v ) end
end
chat.AddText( unpack( args ) )
end
net.Receive( "Vanguard_Message", function( length )
local argc = net.ReadUInt(16)
local args = {}
for i = 1, argc do
if net.ReadBit() == 1 then
table.insert( args, Color( net.ReadUInt(8), net.ReadUInt(8), net.ReadUInt(8), net.ReadUInt(8) ) )
else
table.insert( args, net.ReadString() )
end
end
chat.AddText( unpack( args ) )
end )
end
function Vanguard:LoadPlugins()
local files, _ = file.Find('vanguard/plugins/*.lua', 'LUA')
for _, name in pairs(files) do
print(_,name)
if name ~= '__category.lua' then
print(_,name)
if SERVER then AddCSLuaFile('vanguard/plugins/' .. name) end
PLUGIN = {}
PLUGIN.__index = PLUGIN
PLUGIN.ID = string.gsub(string.lower(name), '.lua', '')
PLUGIN.Title = ""
PLUGIN.Description = ""
PLUGIN.Usage = ""
PLUGIN.Privileges = { "" }
include('vanguard/plugins/' .. name)
local item = PLUGIN
self.Plugins[PLUGIN.ID] = PLUGIN
PLUGIN = nil
end
end
end
hook.Call = function( name, gm, ... )
return Vanguard.HookCall( name, gm, ... )
end
local errCount, s = {}, {}
local function sort( a, b ) return a[2] < b[2] end
function Vanguard.HookCall( name, gm, ... )
s = {}
for _, plug in pairs( Vanguard.Plugins ) do
if type( plug[ name ] ) == "function" then
table.insert( s, { plug, 1 } )
end
end
table.sort( s, sort )
for _, d in ipairs( s ) do
local plug = d[1]
local data = { pcall( plug[ name ], plug, ... ) }
if data[1] == true and data[2] != nil then
table.remove( data, 1 )
return unpack( data )
elseif data[1] == false then
if not errCount[ name ] then errCount[ name ] = {} end
if not errCount[ name ][ plug.ID ] then errCount[ name ][ plug.ID ] = 0 end
end
end
return Vanguard_HOOKCALL( name, gm, ... )
end
function Vanguard:IsNameMatch( ply, str )
if ( str == "*" ) then
return true
elseif ( string.match( str, "STEAM_[0-5]:[0-9]:[0-9]+" ) ) then
return ply:SteamID() == str
elseif ( string.Left( str, 1 ) == "\"" and string.Right( str, 1 ) == "\"" ) then
return ( ply:Nick() == string.sub( str, 2, #str - 1 ) )
else
return ( string.lower( ply:Nick() ) == string.lower( str ) or string.find( string.lower( ply:Nick() ), string.lower( str ), nil, true ) )
end
end
function Vanguard:FindPlayer( name, def, nonum, noimmunity )
local matches = {}
if ( !name or #name == 0 ) then
matches[1] = def
else
if ( type( name ) != "table" ) then name = { name } end
local name2 = table.Copy( name )
if ( nonum ) then
if ( #name2 > 1 and tonumber( name2[ #name2 ] ) ) then table.remove( name2, #name2 ) end
end
for _, ply in pairs( player.GetAll() ) do
for _, pm in pairs( name2 ) do
print(ply,pm,def)
if ( Vanguard:IsNameMatch( ply, pm ) and !table.HasValue( matches, ply ) ) then table.insert( matches, ply ) end
end
end
end
return matches
end
function PMeta:CanTarget(ply)
return table.HasValue( Vanguard.Ranks[self:GetRank()].CanTarget, ply:GetRank() )
end
function PMeta:CanTargetOrEqual( ply )
return ( table.HasValue(Vanguard.Ranks[self:GetRank()].CanTarget,ply:GetRank()) or ( self:GetRank() == ply:GetRank() ) )
end
function PMeta:HasPrivilege( pri )
if ( Vanguard.Ranks[ self:GetRank() ] ) then
return table.HasValue( Vanguard.Ranks[ self:GetRank() ].Privileges, pri )
else
return false
end
end
function PMeta:SetValue(id,val)
if not Vanguard.PlayerInfo[self:SteamID()] then Vanguard.PlayerInfo[self:SteamID()] = {} end
Vanguard.PlayerInfo[self:SteamID()][id] = val
end
function PMeta:GetValue(id,val)
return Vanguard.PlayerInfo[self:SteamID()][id] or val
end
function PMeta:IsAdmin()
return Vanguard.Ranks[self:GetRank()].IsAdmin
end
function PMeta:SetRank( rank )
if( not Vanguard.Ranks[rank]) then return end
self:SetValue( "Rank", rank )
self:SetNWString( "UserGroup", rank )
self:SetUserGroup(rank)
end
function PMeta:GetRank()
local rank = self:GetNetworkedString( "UserGroup" )
if rank == "" then return "Guest" end
if !rank then return "Guest" end
if !Vanguard.Ranks[rank] then return "Guest" end
return rank
end
function PMeta:GetRankColor()
return self:GetRank().Color or color_white
end
function Vanguard:List( tbl, notall )
local lst = ""
local lword = "and"
if ( notall ) then lword = "or" end
if ( #tbl == 1 ) then
lst = tbl[1]:Nick()
elseif ( #tbl == #player.GetAll() ) then
lst = "everyone"
else
for i = 1, #tbl do
if ( i == #tbl ) then lst = lst .. " " .. lword .. " " .. tbl[i]:Nick() elseif ( i == 1 ) then lst = tbl[i]:Nick() else lst = lst .. ", " .. tbl[i]:Nick() end
end
end
return lst
end
function Vanguard:LoadPlayers()
if ( file.Exists( "vanguard/playerinfo.txt", "DATA" ) ) then
debug.sethook()
self.PlayerInfo = util.JSONToTable( file.Read( "vanguard/playerinfo.txt", "DATA" ) )
for k,v in pairs(player.GetAll()) do
if(self.PlayerInfo[v:SteamID()]) then
v:SetRank(self.PlayerInfo [v:SteamID()] ["Rank"] )
end
end
else
self.PlayerInfo = {}
end
end
function Vanguard:SavePlayerInfo()
file.Write( "vanguard/playerinfo.txt", util.TableToJSON( self.PlayerInfo ) )
end
function Vanguard:SaveRanks()
file.Write( "vanguard/userranks.txt", util.TableToJSON(Vanguard.Ranks) )
end
function Vanguard:LoadRanks()
if ( file.Exists( "vanguard/userranks.txt", "DATA" ) ) then
Vanguard.Ranks = util.JSONToTable( file.Read( "vanguard/userranks.txt", "DATA" ) )
else
Vanguard:SaveRanks()
end
end
| cde7ca9bddf5b2e31ed0856eb4c1391db58d8979 | [
"Lua"
] | 6 | Lua | GmAtlas/Vanguard | 2b7fc6462d893b96d968ee1c2c6e4336949974dc | 16b3c1be59942ab3bf1b044d1f8c2c2521fe7520 |
refs/heads/master | <file_sep># flask_tsp_solver
A simple set of solvers for the TSP.
Main purpose of this project was to learn JavaScript and Python Webservices
Also I submitted this to IT-Talent's code competition
# CLI (Linux)
For quick tests, you can use the CLI.
Therefore install all dependencies via
``make``
Then activate the Virtual Environment via:
``./venv/bin/activate``
Then execute the CLI script via
``python CLI.py arg0, ..., arg8``
where ``arg0, ..., arg8`` are places to want to visit and ``arg0`` is the start
# Quickstart Webserver (Linux)
To run the server in a virtual environment, just type
``make && make run``
in the root directory.
This will setup a virtualenv with all dependencies and start the server
You need to have Python, pip and virtualenv installed!
On Ubuntu and Debian may also can execute
```make bootstrap```
to Python, pip and virtualenv.
It will ask for your sudoer's password
You can then access the webpage via http://127.0.0.1:5000
# Installation
To install the package in your python environment use
```python setup.py install```
<file_sep>import unittest
from tsp_application import GoogleDistanceMatrixFactory
class GoogleTest(unittest.TestCase):
origins = ["Perth, Australia", "Sydney, Australia",
"Melbourne, Australia", "Adelaide, Australia",
"Brisbane, Australia", "Darwin, Australia",
"Hobart, Australia", "Canberra, Australia"]
def setUp(self):
self.matrix = GoogleDistanceMatrixFactory("<KEY>")
def testDistanceMatrix(self):
rc = self.matrix.create(GoogleTest.origins)
assert rc
if __name__ == '__main__':
unittest.main()
<file_sep>import json
import os
import pickle
import unittest
from decimal import Decimal as d
from random import choice
from random import uniform
import networkx as nx
from datetime import datetime
from tsp_application import GoogleDistanceMatrixFactory
from tsp_application import algorithms
def mock_shortest_path_metric(n=10, low=1000, hi=5000):
"""
Creates a fake matrix with random distances from the interval [low, hi]
:param n: Number of places
:param low: Lowest distance
:param hi: Highest distance
:return:
"""
# A complete Graph with diameter 1
G = nx.complete_graph(n)
# Each edge gets a random weight
for u, v in G.edges_iter():
G[u][v]["weight"] = int(uniform(low, hi))
# Compute shortest path for each pair (u,v)
P = nx.floyd_warshall(G)
M = {u: dict() for u in G.nodes_iter()}
# Transform weights into shortest path metric
for u, v in G.edges_iter():
M[u][v] = P[u][v]
return M
def createTestset(self, name, places):
"""
Creates a testset with the given places and stores it under the given name
:param name:
:param places:
:return:
"""
factory = GoogleDistanceMatrixFactory("<KEY>")
matrix = factory.create(places)
assert matrix
pickle.dump(matrix, open("testsets/%s.json" % (name), "wb"))
class FullStackTest(unittest.TestCase):
def testAll(self):
path = './testsets'
for fn in os.listdir(path):
testset = pickle.load(open(os.path.join(path, fn), "rb"))
assert testset
start = choice(testset.keys())
assert start
results = {
"__Meta__": {
"Matix": fn,
"Start": start
}
}
for algo in algorithms:
l, p = algorithms[algo].solve(start, testset)
results[algo] = float(d(l))
date = datetime.now()
json.dump(results, open("results/json/%s-%s.json" % (fn,date), "wb"), indent=4, sort_keys=True)
pickle.dump(results, open("results/pickle/%s-%s.pickle" % (fn,date), "wb"))
print results
def OnMockData(self):
testset = mock_shortest_path_metric()
assert testset
start = choice(testset.keys())
assert start
results = {
"__Meta__": {
"Matix": "Random",
}
}
for algo in algorithms:
l, p = algorithms[algo].solve(start, testset)
results[algo] = float(d(l))
date = datetime.now()
json.dump(results, open("results/jsonRandom-%s.json" % (date), "wb"), indent=4, sort_keys=True)
pickle.dump(results, open("results/pickle/Random-%s.pickle" % (date), "wb"))
print results
if __name__ == '__main__':
unittest.main()
<file_sep>import logging
import random
from decimal import Decimal as d
from multiprocessing import Pool, cpu_count
from sys import maxint
import networkx as nx
from abstract_tsp_solver import AbstractTSPSolver
# Default Values for parameters
RHO_DEFAULT = d(0.4)
ITERATIONS_DEFAULT = 1000
BETA_DEFAULT = d(15)
SMALL_Q_DEFAULT = 0.3
#
# Metaparameters,
# We need to fix some parameters to optimize the rest
# Hence these parameters are set in the constructor
Q_DEFAULT = d(10)
ALPHA_DEFAULT = d(10)
COLONY_SIZE_DEFAULT = d(10)
# Fixed initial values
TAU_INITIAL = d(1)
class AntColony(AbstractTSPSolver):
"""
This class implements an ACO algorithm to approximate the TSP
"""
def options(self):
return {
"beta": {
"default" : int(BETA_DEFAULT),
"min": 1,
"max": 1000,
"step": 0.5,
"name": "Pheromone Weight"
},
"rho": {
"default" : float(RHO_DEFAULT),
"min": 0,
"max": 1,
"step": 0.1,
"name": "Evaporation Factor"
},
"q": {
"default" : float(SMALL_Q_DEFAULT),
"min": 0,
"max": 1,
"step": 0.1,
"name": "Exploration Factor"
},
"iterations": {
"default": int(ITERATIONS_DEFAULT),
"min": 100,
"max": 1000000,
"step": 10,
"name": "Iterations"
}
}
def info(self):
return "https://en.wikipedia.org/wiki/Ant_colony_optimization_algorithms"
def update_G(self, G, P, rho):
"""
Updates pherome
:param G:
:param P:
:return:
"""
assert isinstance(G, nx.Graph)
#
# First each egdes gets some pheromone depending on the number of ants which crossed it
#
# Iterate over all paths
for l, path in P:
# Iterate over all edges in the path
for e in path:
# Get data
edge_ctx = G.get_edge_data(*e)[EdgeContext.KEY]
# Add pheromone according to path length
edge_ctx.addPheromone(self.Q/d(l), refresh=False)
#
# Then a constant factor evapoates
#
# Iterate over all edges in the graph
for u, v, edge_ctx in G.edges_iter(data=EdgeContext.KEY):
# Ignore self-edges
if(u != v):
assert edge_ctx
# Let some pheromone evaporate
edge_ctx.evaporate(rho)
def __init__(self,
alpha=ALPHA_DEFAULT,
size=COLONY_SIZE_DEFAULT,
Q = Q_DEFAULT
):
super(AntColony, self).__init__()
self.size = size
self.alpha = alpha
self.Q = Q
def solve(self, s, instance,
beta=BETA_DEFAULT,
rho=RHO_DEFAULT,
q=0.3,
iterations=ITERATIONS_DEFAULT):
# Transform instance to proper nxgraph G
G = nx.Graph()
for node, neigbors in instance.items():
G.add_edges_from([
(node, n, {
"weight": d(w),
EdgeContext.KEY: EdgeContext(w, self.alpha, beta)
}
) for n, w in neigbors.items() if n != node
])
# Path with minimal length
p_min = None
# Length of minimal path
l_min = maxint
# Open process pool
pool = Pool(cpu_count() - 1)
# I
for i in range(int(iterations)):
# Log progress
logging.info("%s started iteration %d" % (self.name, i))
print "%s started iteration %d" % (self.name, i)
# Stores path of each ant
P = []
# Stores async results
futures = []
# Start 10 async computations
for i in range(10):
futures.append(pool.apply_async(Ant, args=(G, s, q, )))
# Wait for all compuations to stop
for future in futures:
# Wait 'till done
future.wait()
# Get result
P.append(future.get())
# Update Pheromone
self.update_G(G, P, rho)
# Update shortest path
for l, p in P:
if l < l_min:
l_min = l
p_min = p
# Close process pool
pool.close()
# Wait for pool to properly shut down
pool.join()
# Return best path
return l_min, [u for u, v in p_min]
def Ant(G,s,q=0.3):
"""
An Ant is a visitor that searches for the TSP path in the given Graph.
:param G: A Graph for which we want to solve the TSP. Each edge must have an instance of EdgeContext as an attribute
:param s: The starting point for the TSP
:param q: The exploration/expoitation parameter.
:return:
"""
def pick_random(edges):
"""
Picks a random edge according to its weight.
:param edges: A list of edges together with their edge context
:return:
"""
#
#
# Here's how it works:
#
# First we compute the probability of each element.
# This is its weight divided by the sum of all weights
#
# Now we imagine all these weights summed up in the [0,1] interval.
#
# It looks something like this:
#
# ++++++++++++++++++++++++++++++++++++++++
# | 0.5 | 0.25 | 0.25 |
# ++++++++++++++++++++++++++++++++++++++++
# 0 1
#
# If we now randomly pick a number in the interval,
# the probility that the number is in a certain interval is
# exactly the probability of the corresponidng element
#
# ++++++++++++++++++++++++++++++++++++++++
# | 0.5 | 0.25 | 0.25 |
# ++++++++++++++++++++++++++++++++++++++++
# 0 ^ 1
# |
# With p=0.5 we pick a point from the first element's interval
# Sum of all weights
sum_weights = sum([d(ctx.attractiveness) for u, v, ctx in edges])
# Variable that stores the cumulative Probability
cum_prob = 0
# Generate a random number between 0 and 1
r = random.random()
# Iterate over all edges, order does not matter
for u, v, ctx in edges:
# Increase cumulative
cum_prob += ctx.attractiveness / sum_weights
# Check if value is in interval
if cum_prob >= r:
# Return edge
return u, v, ctx
# Continue otherwise
# We may reach this point due to rounding errors
# Just return a random element
return random.choice(edges)
assert isinstance(G, nx.Graph)
assert 0.0 <= q <= 1.0
# The path, which the ant took
path = list()
# Length of the path
length = 0
# Nodes, which need to be visited
open = [u for u in G.nodes_iter() if u != s]
# Current Node
current = s
while len(open)>0:
# Grab all admissible edges whose targets have not been visited
candidates = [(u, v, w) for (u, v, w) in G.edges(current, data=EdgeContext.KEY) if v in open]
if random.random() < q:
# Pick uniformly at random -> Exploration
u, v, w = random.choice(candidates)
else:
# Pick random edge according to weight -> Exploitation
u, v , w = pick_random(candidates)
# Append new edge to path
path.append((u, v))
# Update path length
length += w.distance
# Update current node
current = v
# Remove next node from open list
open.remove(v)
# Add distance back to start
length += G[current][s]["weight"]
path.append((current, s))
return length, path
class EdgeContext(object):
KEY = "ACO_EDGE_CTX"
def __init__(self, distance, alpha, beta):
#Set distance
self._distance = distance
# Set alpha
self._alpha = alpha
# Set beat
self._beta = d(beta)
# Set initial TAU
self._tau = TAU_INITIAL
# Compute eta
self._eta = d(1) / self.distance
self._phi = self._eta
# Compute initial attractiveness
self._updateAttractiveness()
def _updateAttractiveness(self):
"""
Updates the attractiveness according to ACO's formula:
phi = eta**alpha * tau**beta
:return:
"""
def assertBiggerZero(n):
"""
Checks if the given value is bigger than 0.
If it is, it returns the value, otherwise the next bigger value
:param n: A number or Decimal
:return: n, is n > d(0)
n.next_plus(), otherwise
"""
# Assert that n is Decimal
n = d(n)
# Check if zero
if n == d(0):
# Return the next bigger number,
# Actual step size is defined by Decimal.Context
return n.next_plus()
else:
return n
#
# The products below are possibly very(!) small and hence rounded to 0 -> bad
# if that's the case, we assign the smallest possible value -> not so bad
#
t_eta = assertBiggerZero(self._eta ** self._alpha)
t_tau = assertBiggerZero(self._tau ** self._beta)
self._phi = assertBiggerZero(t_eta * t_tau)
@property
def pheromone(self):
"""
:return: The current level of pheromone on this edge
"""
return self._tau
@property
def distance(self):
"""
:return: The length of this edge
"""
return self._distance
@property
def attractiveness(self):
"""
:return: The edge's attractivness
"""
return self._phi
def addPheromone(self, delta_tau, refresh = True):
"""
Adds the given amount of pheromone
:param delta_tau: a positive number
:param refresh: Refresh the pheromone value
"""
self._tau = self._tau + delta_tau
if refresh:
self._updateAttractiveness()
def evaporate(self, rho):
"""
Reduces pheromone by factor rho
:param rho: a real number between 0 and 1 (inclusive)
"""
assert 0 <= rho <= 1
self._tau = d(rho) * self._tau
self._updateAttractiveness()
<file_sep>from abstract_tsp_solver import AbstractTSPSolver
class FurthestNeighbor(AbstractTSPSolver):
"""
"""
def options(self):
return {}
def info(self):
return "https://en.wikipedia.org/wiki/Travelling_salesman_problem#Heuristic_and_approximation_algorithms"
def __init__(self):
"""
Creates new Christofides instance
"""
super(FurthestNeighbor, self).__init__()
pass
def solve(self, s, instance, **options):
TSP = [s]
cur = s
l = 0
open = [u for u in instance if u != s]
while len(open)>0:
cur, w = min([(v, w) for v, w in instance[cur].items() if v in open], key = lambda (v, w): (0 - w))
l += w
TSP.append(cur)
open.remove(cur)
l += instance[cur][s]
return l, TSP<file_sep>all: install
bootstrap:
echo "Installing Pip"
sudo apt-get install python-pip
echo "Installing virtualenv"
sudo pip install virtualenv
venv:
virtualenv venv
install: venv
echo "Installing packages from requirements.txt"
venv/bin/pip install -r requirements.txt
run:
venv/bin/python run.py
clean:
rm *.pyc<file_sep>
# Setup LOGGING
import logging
logging.basicConfig(level=logging.INFO)
from flask import Flask
#Create Flask application
app = Flask(__name__)
from google_tsp import GoogleDistanceMatrixFactory
from tsp_solver import algorithms
matrix = GoogleDistanceMatrixFactory("<KEY>")
import tsp_application.views
import tsp_application.commands<file_sep>from flask import render_template
from tsp_application import app, algorithms
@app.route('/')
def main():
return render_template('map.html',
algos=algorithms.values())
<file_sep>import logging
from flask import request, jsonify
from tsp_application import app, matrix, algorithms
@app.route("/ajax/solve")
def ajax_solve():
"""
Serves the AJAX Request to solve the TSP.
The Request consists of:
{
"waypoints": <- A list of waypoints either strings or LatLng-dicts
"origin": <- The starting point
"algo": <- The algorithm to use
"options": <- key/value pairs as arguments for the algorithm (optional)
"travel_mode": <- Mode of travel
}
The result looks as follows
{
"status": <- Status code,
"length": <- length of the path (iff path is found)
"start": <- The starting point (iff path is found)
"algo": <- The used algorithm (iff path is found)
"path": <- A list in which order to visit the endpoints (iff path is found)
"msg": <- An optional message
}
:return:
"""
# The possible errors and their human-readable messages
ERRORS = {
403: "Google Directions could not find a path",
404: "Google Directions did not send response",
405: "You did not specify a start",
406: "You need to specify at least two waypoints",
407: "You did not specify a valid algorithm",
408: "Internal Algorithm Error",
}
def to_tuple(waypoint):
"""
Converts LatLng dicts to tuples.
:param waypoint: A waypoint as string, tuple or LatLng dict
:return: waypoint, if waypoint is string or tuple,
a tuple of the lat and lng values, if dict
"""
if isinstance(waypoint, dict):
return (waypoint["lat"], waypoint["lng"])
else:
return waypoint
def to_dict(waypoint):
"""
Converts to tuples to LatLng dicts.
:param waypoint: A waypoint as string or tuple
:return: waypoint, if waypoint is string or tuple,
a LatNg dict, if tuple
"""
if isinstance(waypoint, tuple):
return {"lat": waypoint[0], "lng": waypoint[1]}
else:
return waypoint
# Get the arguments
json = request.args
# Check that a start point is supplied
start = json.get("origin")
if not start:
return jsonify(status=406, msg=ERRORS[405])
# Convert to tuple if necessary
# This is needed to store waypoints as keys in a dict
start = to_tuple(start)
waypoints = json.getlist("waypoints[]")
if not waypoints:
return jsonify(status=406, msg=ERRORS[406])
# We need to have at least two points for a path
if len(waypoints) < 2:
return jsonify(status=406, msg=ERRORS[406])
# Convert to tuple if necessary
# This is needed to store waypoints as keys in a dict
waypoints = map(to_tuple, waypoints)
# Get the algorithm
algorithm = algorithms[json["algo"]]
if not algorithm:
return jsonify(status=407, msg=ERRORS[407])
# Get the options
options = {}
for option in algorithm.options():
options[option] = float(json.get("options[%s]" % option))
try:
distances = matrix.create(waypoints)
except BaseException as e:
logging.warning("Exception %s while creating matrix for %s" % (e, waypoints))
return jsonify(status=404, msg=ERRORS[404])
else:
if distances:
try:
# Call the algorithm
l, path = algorithm.solve(start, distances, **options)
except BaseException as e:
logging.warning("Exception %s while executing %s with %s" % (e, algorithm.name, options))
return jsonify(status=408, msg=ERRORS[408])
else:
# Pack result
result = {
"status": 200,
"length": l,
"start": start,
"algo": json["algo"],
"path": map(to_dict, path),
"msg": "SUCCESS"
}
# Return the result
return jsonify(result)
else:
return jsonify(status=403, msg=ERRORS[403])
<file_sep>from abc import abstractmethod
class AbstractTSPSolver(object):
"""
Abstract base class for an algorithm, that solves the TSP problem
"""
def __init__(self):
pass
@abstractmethod
def solve(self, s, distance_graph, **kwargs):
"""
Solves the TSP Problem for the given instance
An instance is two-dimenational dict, which represents distance matrix
s must be any key in that dict
:param s: The starting point
:param distance_graph: A datastructure that contains nodes and their distances to other nodes
:param kwargs: additional key-word arguments for implemtations
:return: A tuple (d,p) where
- d is the path's length
- p is the path represented as a list nodes
"""
pass
@abstractmethod
def info(self):
"""
:return: A human readable description of the algorithm
"""
return ""
@abstractmethod
def options(self):
"""
Returns a dict with additional information about this algorithms options.
Each option must look like this
{
"key": {
"default" : # <- Default value
"min": , # <- Min value
"max": , # <- Max. value
"step": , # <- Step size
"name": # <- Human readable name
},
}
Note: these dicts are used to create the forms in the GUI
:return: A dict of options
"""
return {}
@property
def name(self):
"""
:return: This algorithm's name
"""
return self.__class__.__name__
<file_sep>
from setuptools import setup
setup(
name = "tsp",
version = "0.0.1",
author = "<NAME>",
author_email = "<EMAIL>",
description = ("A Tsp Solver for IT Talents"),
license = "BSD",
packages=['tsp_application', 'tests'],
package_data={'tests': ['testsets/*.pickle'],
'tsp_application': ['templates/*.html']},
install_requires=[
'Flask',
'networkx',
'googlemaps',
],
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: BSD License",
],
)<file_sep>from itertools import product
from math import fmod
import networkx as nx
from abstract_tsp_solver import AbstractTSPSolver
class Christofides(AbstractTSPSolver):
"""
Solves metric instances of the TSP problem in O(n^3)
time with worst case approximation ratio of 1.5
"""
def options(self):
return {}
def info(self):
return "https://en.wikipedia.org/wiki/christofides_algorithm"
def __init__(self):
"""
Creates new Christofides instance
"""
super(Christofides, self).__init__()
pass
def solve(self, s, instance, **options):
#
# Christofides' Heuristic works as follows:
#
# Given a Graph G it computes the Minimum Spanning Tree T
#
# Then all of T's nodes with odd degree are matched.
# That means, that each odd node is connected with another, s.t. the sum of all these paths is minimal
# Since there must be an even number of odd nodes, each has a partner.
#
# Now all nodes have an even degree and an Euler Circle can be computed (cf. Handshake Lemma)
# An Euler Circle is a path that uses every edge exactly once
# (Note, that this is possible iff each degree is even).
#
# Lastly any node that appears twice is removed.
#
# The resulting path is at most 1.5 times the length of the optimum
#
# The algorithm run in O(n^3) where n is the input size
#
#Transform instance to nxgraph G
G = nx.Graph()
# Iterate over all nodes and their connections
for u, u_neighbors in instance.items():
# Add each connection to Graph
G.add_weighted_edges_from([(u, v, w) for v, w in u_neighbors.items() if u != v])
# Create Spanning Tree T
T = nx.minimum_spanning_tree(G)
# Find all nodes with odd degree.
V = [u for u in T.nodes() if fmod(T.degree(u), 2) == 1]
# Reduce G to the Nodes in V:
# That means all paths that contain nodes, which are not in V, are replaced by a edge with the paths' weight
G_V = nx.Graph()
for v, w in product(V, V):
if v != w:
weight = nx.dijkstra_path_length(G, v, w)
# We need the negative weight for the matching in the next step
G_V.add_edge(v, w, weight=-weight)
#
# Since the weights are negated, max_weight_matching will actually create a min-weight-matching
# However, we need to set maxcardinality= True,
# otherwise it will return the empty matching (which has a weight of zero and is therefore the maximum matching)
#
# M is a dict of nodes and their matching partners
M = nx.max_weight_matching(G_V, maxcardinality=True)
#
# TuM is the conjunction of T and M
# It has to be MultiGraph, otherwise we cannot assure its eulerian
#
# Example:
# Assume nodes u and v are of odd degree and the edge {u,v} exists in both T and M
#
#
TuM = nx.MultiGraph(selfloops=False)
# Add all edges from the matching
for u, v in M.items():
weight = G_V.get_edge_data(u, v)["weight"]
# Only add edges once
if u not in G_V.neighbors(v):
# Don't forget to revert weight back to positive value
TuM.add_edge(u, v, weight= -weight)
# Add all edges from the matching
for u in T.nodes_iter():
for v in T.neighbors_iter(u):
weight = T.get_edge_data(u, v)["weight"]
TuM.add_edge(u, v, weight=weight)
# Construct the Euler Circut on TuM
Euler = [u for u, v in nx.eulerian_circuit(TuM, source=s)]
# TSP holds our solution path
TSP = [s]
#length of the solution path
l = 0
# Current Node in the Hamilton Path
current = s
# Iterate over each edge in the path
for v in Euler:
# Compute shortcut for each node we've not yet visited
if v not in TSP:
# Always take the shortest path from the current position
# Dijkstra runs in O(n^2) and we execute it n times, so this says in bound
l += nx.dijkstra_path_length(TuM, current, v)
# Update Path and current location
current = v
TSP.append(v)
# Lastly, go back to start!
l += nx.dijkstra_path_length(TuM, current, s)
return l, TSP
<file_sep>#!flask/bin/python
from tsp_application import app
app.run()<file_sep>from tsp_application import GoogleDistanceMatrixFactory
from tsp_application import algorithms
import sys
factory = GoogleDistanceMatrixFactory("<KEY>")
print "Welcome to the TSP Solver"
print 'Your queries are', str(sys.argv[1:])
matrix = factory.create(sys.argv[1:])
if not matrix:
print "Google didn't find the places"
sys.exit(0)
start = matrix.keys()[0]
for algo in algorithms:
print algo, " started"
l, p = algorithms[algo].solve(start, matrix)
print algo, " computed length ", l
print algo, " computed path ", p
<file_sep>from google_distance_matrix import GoogleDistanceMatrixFactory
<file_sep>from abstract_tsp_solver import AbstractTSPSolver
from antcolony import AntColony
from christofides import Christofides
from furthest_neighbor import FurthestNeighbor
from nearest_neighbor import NearstNeighbor
algorithms = { algo.name: algo
for algo in
[
Christofides(),
AntColony(),
NearstNeighbor(),
FurthestNeighbor()
]
}<file_sep>from itertools import izip
import googlemaps
import googlemaps.distance_matrix as dm
class GoogleDistanceMatrixFactory(object):
"""
A factory class that uses Google Maps to create distance matrices
"""
def __init__(self, key):
"""
Creates a new instance
:param key: The API-Key for Google Maps
"""
self.key = key
self.client = googlemaps.Client(self.key)
def create(self, places, data=u"duration"):
"""
Creates a new distance matrix
:param places:
:param data:
:return:
"""
# Make sure, we have list of entries
if not isinstance(places, list):
return None
# Make sure, that we use a valid metric supported by Google Maps
if not unicode(data) in [u"duration", u"distance"]:
return None
# Response is HTTP from Google
response = dm.distance_matrix(self.client, places, places)
# Check if response was successful
if response[u"status"] == u"OK":
# Variable for return value
matrix = dict()
# Iterate over each place and its corresponding row in the response
for start, row in izip(places, response[u"rows"]):
# Wrap dict
# start = GoogleDistanceMatrixFactory.Place(start)
# Create entry for starting place
matrix[start] = dict()
# Iterate over all possible destinations and their indvidual responses
for dest, element in izip(places,row[u"elements"]):
# Check if a path was found
if element[u"status"] != u"OK":
return None
# Create entry for start and destination
matrix[start][dest] = element[unicode(data)][u"value"]
return matrix
else:
return None
| 6895e5a40ab2f2696cf17309c4d40828085cf171 | [
"Makefile",
"Markdown",
"Python"
] | 17 | Makefile | Ah0ih0i/flask_tsp_solver | b284bf537a1a88f15e0380fb154a295412ba273f | 51d22b75dfe29ab6056ce1c8328bf509beade700 |
refs/heads/main | <repo_name>Abdullayev999/DynamicIntArray<file_sep>/DynamicIntArray/DynamicIntArray.cpp
#include <iostream>
#include<iomanip>
/* Задание 1:
Класс IntArray - обертка для динамического массива (int)
Реализуйте методы для:
+ добавления элемента в конец массива
+ получения доступа к конкретному элементу по заданному индексу
+ получения количества элементов в массиве
+ проверки является ли массив пустым
+ все поля должны быть приватными
+ заполните массив и распечатайте его данные в main
*/
class IntArray
{
public:
IntArray() {
++m_count;
}
IntArray(const int number) {
++m_count;
SetElementEnd(number);
}
~IntArray() {
delete[] m_arr;
--m_count;
}
static int getCount() {
return m_count;
}
bool arrayState() {
if (m_index)
return true;
else
return false;
}
int getElement(const int number) {
return m_arr[number];
}
int getSize() {
return m_index;
}
void SetElementStar(int number)
{
if (m_index == m_size)capacity();
if (m_index) {
int* tmp = new int[m_size];
tmp[0] = number;
for (int i = 0, b = 1; i < m_index; b++, i++)
tmp[b] = m_arr[i];
m_index++;
delete[] m_arr;
m_arr = tmp;
}
else {
m_arr[m_index++] = number;
}
}
void SetElementEnd(const int number)
{
if (m_index == m_size)capacity();
m_arr[m_index++] = number;
}
private:
void capacity() {
m_size += m_capacity;
int* tmp = new int[m_size];
for (int i = 0; i < m_index; i++)
tmp[i] = m_arr[i];
delete[] m_arr;
m_arr = tmp;
}
size_t m_size = 5;
size_t m_index = 0;
int* m_arr = new int[m_size];
size_t m_capacity = 5;
static unsigned int m_count;
};
unsigned int IntArray::m_count = 0;
int main()
{
IntArray a;
if (a.arrayState())
{
std::cout << "\nArrar # " << a.getCount() << "\n__________________\n |\n";
for (int i = 0; i < a.getSize(); i++)
{
std::cout << " arr["
<< std::right << std::setw(2) << std::setfill(' ') << i << "] = "
<< std::right << std::setw(5) << a.getElement(i) << " |\n";
}
std::cout << "__________________|\n\n";
}
else
{
std::cout << "\nNo data!!\n\n";
}
a.SetElementEnd(10);
a.SetElementEnd(11);
a.SetElementEnd(12345);
a.SetElementEnd(13);
a.SetElementEnd(14);
a.SetElementEnd(10);
a.SetElementEnd(11);
a.SetElementEnd(12);
a.SetElementEnd(13);
if (a.arrayState()) {
std::cout << "\nArrar # " << a.getCount() << "\n__________________\n |\n";
for (int i = 0; i < a.getSize(); i++) {
std::cout << " arr["
<< std::right << std::setw(2) << std::setfill(' ') << i << "] = "
<< std::right << std::setw(5) << a.getElement(i) << " |\n";
}
std::cout << "__________________|\n\n";
}
else {
std::cout << "\nNo data!!\n\n";
}
a.SetElementEnd(10);
a.SetElementEnd(11);
a.SetElementEnd(12);
a.SetElementEnd(13);
a.SetElementEnd(14);
a.SetElementStar(9);
a.SetElementStar(8);
a.SetElementStar(7);
a.SetElementStar(6);
a.SetElementStar(5);
a.SetElementStar(4);
a.SetElementStar(3);
a.SetElementStar(2);
a.SetElementStar(1);
if (a.arrayState())
{
std::cout << "\nArrar # " << a.getCount() << "\n__________________\n |\n";
for (int i = 0; i < a.getSize(); i++)
{
std::cout << " arr["
<< std::right << std::setw(2) << std::setfill(' ') << i << "] = "
<< std::right << std::setw(5) << a.getElement(i) << " |\n";
}
std::cout << "__________________|\n\n";
}
else
{
std::cout << "\nNo data!!\n\n";
}
std::cout << "\nArray size = " << a.getSize() << "\n\n";
std::cout << "Array state : " << std::boolalpha << a.arrayState() << '\n';
IntArray b(5);
if (b.arrayState())
{
std::cout << "\nArrar # " << b.getCount() << "\n__________________\n |\n";
for (int i = 0; i < b.getSize(); i++)
{
std::cout << " arr["
<< std::right << std::setw(2) << std::setfill(' ') << i << "] = "
<< std::right << std::setw(5) << b.getElement(i) << " |\n";
}
std::cout << "__________________|\n\n";
}
else
{
std::cout << "\nNo data!!\n\n";
}
b.SetElementEnd(10);
b.SetElementEnd(11);
b.SetElementEnd(12);
b.SetElementEnd(13);
b.SetElementStar(999);
if (b.arrayState())
{
std::cout << "\nArrar # " << b.getCount() << "\n__________________\n |\n";
for (int i = 0; i < b.getSize(); i++)
{
std::cout << " arr["
<< std::right << std::setw(2) << std::setfill(' ') << i << "] = "
<< std::right << std::setw(5) << b.getElement(i) << " |\n";
}
std::cout << "__________________|\n\n";
}
else {
std::cout << "\nNo data!!\n\n";
}
std::cout << "\nArray size = " << b.getSize() << "\n\n";
std::cout << "Array state : " << std::boolalpha << b.arrayState() << '\n';
std::cout << "\nCount array = " << b.getCount();
}
| b5bf03a845745fe6b495b9bd7e85b398db5c6092 | [
"C++"
] | 1 | C++ | Abdullayev999/DynamicIntArray | 28080a07078e8a6ceb8e1a9e294a6bdca8342b97 | 5cc870a83ccb9c1d49bcc57132d432d4536b85d7 |
refs/heads/master | <repo_name>tanveerctg/latest_new_burger_app<file_sep>/src/components/BurgerTypes/BurgerTypes.js
import React, { Component } from "react";
import classes from "./BurgerTypes.module.scss";
import eggBurger from "./egg.png";
import beef from "./beef.png";
import jr from "./j.png";
import salad from "./salad.png";
import cheese from "./cheese.png";
import customized from "./customized.png";
import chicken from "./chicken.png";
import all from "./all.png";
import { connect } from "react-redux";
import getFilterBurger from "../../getFilterBurger/getFilterBurger";
import { Link } from "react-router-dom";
class BurgerTypes extends Component {
filterByType = e => {
this.props.dispatch({
type: "FILTER_BY_TYPES",
name: e.target.attributes.val.nodeValue
});
this.props.dispatch({
type: "BURGER_TYPE_NAME",
name: e.target.nextSibling.innerText
});
};
render() {
return (
<section className={classes.burgerTypes}>
<div className={classes.row}>
<div className={classes.burgerTypes__all_burgers}>
<div
className={classes.burgerTypes__burger}
style={{ cursor: "pointer" }}
>
<img
src={all}
className={classes.burgerTypes__burgerImage}
val="all"
onClick={this.filterByType}
/>
<h2
className={classes.burgerTypes__burgerName}
style={{ textAlign: "center" }}
>
All Burgers
</h2>
</div>
<div
className={classes.burgerTypes__burger}
style={{ cursor: "pointer" }}
>
<img
src={cheese}
className={classes.burgerTypes__burgerImage}
val="cheese"
onClick={this.filterByType}
/>
<h2 className={classes.burgerTypes__burgerName}>Cheese Burger</h2>
</div>
<div
className={classes.burgerTypes__burger}
style={{ cursor: "pointer" }}
>
<img
src={chicken}
className={classes.burgerTypes__burgerImage}
val="chicken"
onClick={this.filterByType}
/>
<h2 className={classes.burgerTypes__burgerName}>
Chicken & More
</h2>
</div>
<div
className={classes.burgerTypes__burger}
style={{ cursor: "pointer" }}
>
<img
src={salad}
className={classes.burgerTypes__burgerImage}
val="salad"
onClick={this.filterByType}
/>
<h2 className={classes.burgerTypes__burgerName}>
Salad and Veggies
</h2>
</div>
<div
className={classes.burgerTypes__burger}
style={{ cursor: "pointer" }}
>
<img
src={jr}
className={classes.burgerTypes__burgerImage}
val="jr meals"
onClick={this.filterByType}
/>
<h2
className={classes.burgerTypes__burgerName}
style={{ textAlign: "center" }}
>
Jr ™ meals
</h2>
</div>
<div
className={classes.burgerTypes__burger}
style={{ cursor: "pointer" }}
>
<img
src={beef}
className={classes.burgerTypes__burgerImage}
val="beef"
onClick={this.filterByType}
/>
<h2
className={classes.burgerTypes__burgerName}
style={{ textAlign: "center" }}
>
Beef
</h2>
</div>
<div
className={classes.burgerTypes__burger}
style={{ cursor: "pointer" }}
>
<Link
to="burgerBuilder"
style={{ textDecoration: "none", color: "white" }}
>
<img
src={customized}
className={classes.burgerTypes__burgerImage}
val="beef"
onClick={this.filterByType}
/>
<h2
className={classes.burgerTypes__burgerName}
style={{ textAlign: "center" }}
>
Make Your Own Burger
</h2>
</Link>
</div>
</div>
</div>
</section>
);
}
}
const mapStateToProps = state => {
return {
getFilterBurger: getFilterBurger(
state.adminReducer.allBurgers,
state.filterReducer
)
};
};
export default connect(mapStateToProps)(BurgerTypes);
<file_sep>/src/components/ContactData/ContactData.js
import React, { Component } from "react";
import classes from "./ContactData.module.scss";
import Aux from "../../HOC/helper";
import { FontAwesomeIcon } from "@fortawesome/react-fontawesome";
import bkash from "./bkash.png";
class ContactData extends Component {
state = {
data: {
name: null,
email: null,
mobile: null,
address: null,
tableNo: null,
selectedOption: null
}
};
nameChangeHandler = e => {
const regEx = /^[a-z\s?\-?]+$/gi;
if (e.target.value !== null) {
if (regEx.test(e.target.value)) {
let getValue = e.target.value;
let upgradeName = { ...this.state.data };
upgradeName["name"] = getValue;
this.setState({ data: upgradeName });
e.target.className = classes.valid;
} else {
let upgradeName = { ...this.state.data };
upgradeName["name"] = null;
this.setState({ data: upgradeName });
e.target.className = classes.invalid;
}
}
e.preventDefault();
};
emailChangeHandler = e => {
const regEx = /^([\w\.?\-?]+)@([a-z]+)(\.[a-z]{2,8})(\.[a-z]{2,8})?$/gi;
if (e.target.value !== null) {
if (regEx.test(e.target.value)) {
let getValue = e.target.value;
let upgradeEmail = { ...this.state.data };
upgradeEmail["email"] = getValue;
this.setState({ data: upgradeEmail });
e.target.className = classes.valid;
} else {
let upgradeEmail = { ...this.state.data };
upgradeEmail["email"] = null;
console.log(upgradeEmail);
this.setState({ data: upgradeEmail });
e.target.className = classes.invalid;
}
}
e.preventDefault();
};
mobileChangeHandler = e => {
const regEx = /^(\d){11}$/gi;
if (e.target.value !== null) {
if (regEx.test(e.target.value)) {
let getValue = e.target.value;
let upgradeMobile = { ...this.state.data };
upgradeMobile["mobile"] = getValue;
this.setState({ data: upgradeMobile });
e.target.className = classes.valid;
} else {
let upgradeName = { ...this.state.data };
upgradeName["mobile"] = null;
this.setState({ data: upgradeName });
e.target.className = classes.invalid;
}
}
e.preventDefault();
};
addressChangeHandler = e => {
const regEx = /.+/gi;
if (e.target.value !== null) {
if (regEx.test(e.target.value)) {
let getValue = e.target.value;
let upgradeAddress = { ...this.state.data };
upgradeAddress["address"] = getValue;
this.setState({ data: upgradeAddress });
e.target.className = classes.valid;
} else {
let upgradeName = { ...this.state.data };
upgradeName["address"] = null;
this.setState({ data: upgradeName });
e.target.className = classes.invalid;
e.target.className = classes.invalid;
}
}
e.preventDefault();
};
handleSubmit = e => {
if (this.props.selectedOption === "home") {
let name = this.state.data.name;
let email = this.state.data.email;
let mobile = this.state.data.mobile;
let address = this.state.data.address;
let payment = this.state.data.selectedOption;
let data = { name, email, mobile, address, payment };
if (name && email && mobile && address && payment) {
console.log("ok");
this.props.contactInfo(data);
setTimeout(() => {
this.props.buy();
}, 100);
} else {
this.props.error(true);
}
} else {
let name = this.state.data.name;
let table = this.state.data.tableNo;
let payment = this.state.data.selectedOption;
let data = { name, table, payment };
console.log(name, table, payment);
if (name && table && payment) {
console.log("ok");
this.props.contactInfo(data);
setTimeout(() => {
this.props.buy();
}, 100);
} else {
console.log("not ok");
this.props.error(true);
}
}
e.preventDefault();
};
handleChange = e => {
const regEx = /\d+/gi;
if (e.target.value !== null) {
if (regEx.test(e.target.value)) {
let getValue = e.target.value;
let upgradeTableNo = { ...this.state.data };
upgradeTableNo["tableNo"] = getValue;
this.setState({ data: upgradeTableNo });
e.target.className = classes.valid;
} else {
let getValue = e.target.value;
let upgradeTableNo = { ...this.state.data };
upgradeTableNo["tableNo"] = getValue;
this.setState({ data: upgradeTableNo });
e.target.className = classes.invalid;
}
}
e.preventDefault();
};
handleOptionChange = e => {
let upgradeTable = { ...this.state.data };
if (!!upgradeTable["selectedOption"]) {
upgradeTable["selectedOption"] = false;
this.setState({ data: upgradeTable });
} else {
upgradeTable["selectedOption"] = true;
this.setState({ data: upgradeTable });
}
// this.setState({ selectedOption: e.target.value });
};
render() {
console.log(!!this.state.data.selectedOption);
return (
<form onSubmit={this.handleSubmit} className={classes.form}>
{this.props.selectedOption === "home" ? (
<Aux>
<div style={{ position: "relative", width: "100%" }}>
<input
placeholder="Name"
type="text"
onChange={this.nameChangeHandler}
/>
<label>Name</label>
<FontAwesomeIcon
icon={["fas", "check-circle"]}
style={{
display: "inlineBlock",
position: "absolute",
right: "0",
margin: "3% 0 0 0"
}}
size="2x"
color="#6DB65B"
/>
<FontAwesomeIcon
icon={["fas", "times-circle"]}
style={{
display: "inlineBlock",
position: "absolute",
right: "0",
margin: "3% 0 0 0"
}}
size="2x"
color="red"
/>
</div>
<div style={{ position: "relative", width: "100%" }}>
<input
placeholder="Email"
type="text"
onChange={this.emailChangeHandler}
/>
<label>Email</label>
<FontAwesomeIcon
icon={["fas", "check-circle"]}
style={{
display: "inlineBlock",
position: "absolute",
right: "0",
margin: "3% 0 0 0"
}}
size="2x"
color="#6DB65B"
/>
<FontAwesomeIcon
icon={["fas", "times-circle"]}
style={{
display: "inlineBlock",
position: "absolute",
right: "0",
margin: "3% 0 0 0"
}}
size="2x"
color="red"
/>
</div>
<div style={{ position: "relative", width: "100%" }}>
<input
placeholder="Mobile"
type="text"
onChange={this.mobileChangeHandler}
/>
<label>Mobile</label>
<FontAwesomeIcon
icon={["fas", "check-circle"]}
style={{
display: "inlineBlock",
position: "absolute",
right: "0",
margin: "3% 0 0 0"
}}
size="2x"
color="#6DB65B"
/>
<FontAwesomeIcon
icon={["fas", "times-circle"]}
style={{
display: "inlineBlock",
position: "absolute",
right: "0",
margin: "3% 0 0 0"
}}
size="2x"
color="red"
/>
</div>
<div style={{ position: "relative", width: "100%" }}>
<textarea
placeholder="Address"
type="text"
onChange={this.addressChangeHandler}
/>
<label>Address</label>
<FontAwesomeIcon
icon={["fas", "check-circle"]}
style={{
display: "inlineBlock",
position: "absolute",
right: "0",
margin: "3% 0 0 0"
}}
size="2x"
color="#6DB65B"
/>
<FontAwesomeIcon
icon={["fas", "times-circle"]}
style={{
display: "inlineBlock",
position: "absolute",
right: "0",
margin: "3% 0 0 0"
}}
size="2x"
color="red"
/>
</div>
</Aux>
) : (
<React.Fragment>
<div style={{ position: "relative", width: "100%" }}>
<input
placeholder="Name"
type="text"
onChange={this.nameChangeHandler}
/>
<label>Name</label>
<FontAwesomeIcon
icon={["fas", "check-circle"]}
style={{
display: "inlineBlock",
position: "absolute",
right: "0",
margin: "3% 0 0 0"
}}
size="2x"
color="#6DB65B"
/>
<FontAwesomeIcon
icon={["fas", "times-circle"]}
style={{
display: "inlineBlock",
position: "absolute",
right: "0",
margin: "3% 0 0 0"
}}
size="2x"
color="red"
/>
</div>
<div style={{ position: "relative", width: "100%" }}>
<select onChange={this.handleChange}>
{!this.state.data.tableNo ? (
<option default value="">
Select Table
</option>
) : null}
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
</select>
{!this.state.data.tableNo ? null : (
<label style={{ opacity: "1", transform: "translateY(-450%)" }}>
Table No
</label>
)}
<FontAwesomeIcon
icon={["fas", "check-circle"]}
style={{
display: "inlineBlock",
position: "absolute",
right: "0",
margin: "3% 0 0 1%"
}}
size="2x"
color="#6DB65B"
/>
<FontAwesomeIcon
icon={["fas", "times-circle"]}
style={{
display: "inlineBlock",
position: "absolute",
right: "0",
margin: "3% 0 0 1%"
}}
size="2x"
color="red"
/>
</div>
</React.Fragment>
)}
<div style={{ position: "relative", width: "100%" }}>
<div
style={{
display: "inline-block",
width: "81%",
display: "flex",
justifyContent: "start",
position: "relative"
}}
>
<label
style={{
color: "#333333",
fontWeight: "600",
display: "inline-block"
}}
>
<input
type="checkbox"
name="react-tips"
onChange={this.handleOptionChange}
style={{
marginRight: ".5rem",
display: "inline-block",
width: "inherit"
}}
/>
Cash On Delivery
</label>
</div>
</div>
<div style={{ position: "relative", width: "100%" }}>
<div
style={{
display: "inline-block",
width: "81%",
display: "flex",
justifyContent: "start",
position: "relative"
}}
>
<label
style={{
color: "#333333",
fontWeight: "600",
display: "inline-block"
}}
>
(
<img
src={bkash}
style={{ width: "60px", display: "inlineBlock" }}
></img>{" "}
<span>will be implemented later</span>
)
</label>
</div>
</div>
<div>
<input type="submit" value="Order" />
</div>
</form>
);
}
}
export default ContactData;
<file_sep>/src/store/burgerReducer.js
const prices = {
Salad: 5,
Meat: 50,
Cheese: 40,
Chicken: 30
};
const initialState = {
ingredients: {
Salad: 0,
Meat: 0,
Cheese: 0,
Chicken: 0
},
totalPrice: 10,
orders: []
};
const burgerReducer = (state = initialState, action) => {
switch (action.type) {
case "ADD_ITEM": {
return {
...state,
ingredients: {
...state.ingredients,
[action.itemName]: state.ingredients[action.itemName] + 1
},
totalPrice: state.totalPrice + prices[action.itemName],
orders: state.orders
};
}
case "REMOVE_ITEM": {
return {
...state,
ingredients: {
...state.ingredients,
[action.itemName]:
state.ingredients[action.itemName] !== 0
? state.ingredients[action.itemName] - 1
: false
},
totalPrice: state.totalPrice - prices[action.itemName],
orders: state.orders
};
}
case "INITIAL_STATE": {
return {
...state,
ingredients: initialState.ingredients,
totalPrice: initialState.totalPrice,
orders: state.orders
};
}
case "GET_ALL_ORDERS": {
return {
...state,
...initialState,
orders: action.items
};
}
case "CLEAR_ORDER": {
return {
...state,
...initialState,
orders: []
};
}
case "CANCEL_ORDER": {
return {
...state,
orders: state.orders.filter(item => item.id !== action.id)
};
}
default:
return state;
}
};
export default burgerReducer;
<file_sep>/src/store/adminReducer.js
const initialState = {
allBurgers: [],
price: {},
itemsInTheCart: {},
totalItemsInTheCart: 0,
addedToCartItmsInfo: [],
orderType: "",
customizedOrder: false,
normalOrder: false
};
const adminReducer = (state = initialState, action) => {
switch (action.type) {
case "FETCH_BURGERS_FROM_ADMIN": {
return {
...state,
allBurgers: [...action.allBurgers],
price: action.prices
};
}
case "ADD ITEM TO CART": {
let numberOfItems;
if (state.itemsInTheCart[action.name]) {
numberOfItems = Number(state.itemsInTheCart[action.name]) + 1;
} else {
numberOfItems = 1;
}
const itemsInTheCart = {
...state.itemsInTheCart,
[action.name]: numberOfItems
};
let totalItems = 0;
for (let itm in itemsInTheCart) {
totalItems = totalItems + itemsInTheCart[itm];
}
return {
...state,
itemsInTheCart: itemsInTheCart,
totalItemsInTheCart: totalItems
};
}
case "ADD QUANTITY TO CART": {
let numberOfItems;
if (state.itemsInTheCart[action.name]) {
numberOfItems =
state.itemsInTheCart[action.name] + Number(action.quantity);
} else {
numberOfItems = action.quantity;
}
const itemsInTheCart = {
...state.itemsInTheCart,
[action.name]: numberOfItems
};
let totalItems = 0;
for (let itm in itemsInTheCart) {
totalItems = totalItems + itemsInTheCart[itm];
}
return {
...state,
itemsInTheCart: itemsInTheCart,
totalItemsInTheCart: totalItems
};
}
case "REMOVE ITEM FROM CART": {
let numberOfItems;
if (state.itemsInTheCart[action.name]) {
numberOfItems = state.itemsInTheCart[action.name] - 1;
}
const itemsInTheCart = {
...state.itemsInTheCart,
[action.name]: numberOfItems > 0 ? numberOfItems : 0
};
let totalItems = 0;
for (let itm in itemsInTheCart) {
totalItems = totalItems + itemsInTheCart[itm];
}
return {
...state,
itemsInTheCart: itemsInTheCart,
totalItemsInTheCart: totalItems ? totalItems : 0
};
}
case "ADDED TO CART ITEMS INFO": {
let items = [];
for (let itm in state.itemsInTheCart) {
items.push({
name: itm,
quantity: state.itemsInTheCart[itm],
price: Number(state.price[itm]) * state.itemsInTheCart[itm]
});
}
return {
...state,
addedToCartItmsInfo: items
};
}
case "DELETE ITEM FROM CART": {
let numberOfItems;
numberOfItems = state.itemsInTheCart[action.name] * 0;
const itemsInTheCart = {
...state.itemsInTheCart,
[action.name]: numberOfItems > 0 ? numberOfItems : 0
};
let totalItems = 0;
for (let itm in itemsInTheCart) {
totalItems = totalItems + itemsInTheCart[itm];
}
return {
...state,
addedToCartItmsInfo: state.addedToCartItmsInfo.filter(
itm => itm.name !== action.name
),
itemsInTheCart: {
...state.itemsInTheCart,
[action.name]: numberOfItems
},
totalItemsInTheCart: totalItems ? totalItems : 0
};
}
case "CUSTOMIZED ORDER": {
return {
...state,
orderType: action.value
};
}
case "ORDER FROM CUSTOMIZED SECTION": {
return {
...state,
customizedOrder: action.value
};
}
case "NORMAL ORDER": {
return {
...state,
normalOrder: action.value
};
}
case "SET INITIAL ADMIN STATE": {
console.log("admin");
return {
...state,
price: {},
itemsInTheCart: {},
totalItemsInTheCart: 0,
addedToCartItmsInfo: [],
orderType: "",
customizedOrder: false,
normalOrder: false
};
}
case "SET AFTER ORDER": {
return {
...state,
itemsInTheCart: {},
totalItemsInTheCart: 0,
addedToCartItmsInfo: [],
orderType: "",
customizedOrder: false,
normalOrder: false
};
}
default:
return state;
}
};
export default adminReducer;
<file_sep>/src/components/UI/Backdrop/Backdrop.module.scss
.Backdrop{
position: fixed;
top:0;
left:0;
height: 100%;
background: rgba(0,0,0,.8);
z-index:400;
width:100%;
}<file_sep>/src/store/Actions/get_All_Orders.js
export const getAllOrders = id => {
return dispatch => {
// var starCountRef = firebase.database().ref(`orders/${id}`);
// starCountRef.once('value',(snapshot)=>{
// let containerOrders=[];
// for(let order in snapshot.val()){
// containerOrders.push({id:order,...snapshot.val()[order]})
// }
// dispatch({type:'GET_ALL_ORDERS',items:containerOrders})
// })
fetch(`https://testing-bc79f.firebaseio.com/orders/${id}.json`)
.then(res => res.json())
.then(data => {
let containerOrders = [];
for (let key in data) {
const info = {
id: key,
...data[key]
};
containerOrders.push(info);
}
containerOrders.sort((a, b) =>
a.cancelOrderTime < b.cancelOrderTime ? 1 : -1
);
dispatch({ type: "GET_ALL_ORDERS", items: containerOrders });
});
};
};
<file_sep>/src/components/Burger/Burger.js
import React, { Component } from "react";
import classes from "./Burger.module.scss";
import stockout from "./stockout.jpg";
class burger extends Component {
link = () => {
const { url, name, calories, id, price, description, status } = this.props;
const data = { url, name, calories, id, price, description };
if (status !== "stockOut") {
this.props.history.push({
pathname: `/burger/${this.props.id}`,
state: { ...data }
});
}
};
render() {
const { url, name, calories, id, price, status } = this.props;
let allClass = [];
{
status !== "stockOut"
? allClass.push(classes.burgerImage)
: allClass.push(classes.notHover);
}
return (
<div
className={classes.burger}
onClick={this.link}
style={{ cursor: "pointer" }}
>
<img src={url} className={allClass.join("")} />
{status === "new" ? <h3 className={classes.new}>New</h3> : null}
{status === "stockOut" ? (
<img src={stockout} className={classes.stockOut} />
) : null}
<h2 className={classes.burgerName}>{name}</h2>
<h2 className={classes.calorie}>
{calories} <strong>Cal</strong>
</h2>
<h3 className={classes.price}>price : {price} tk</h3>
</div>
);
}
}
export default burger;
<file_sep>/src/containers/SpecificBurger/SpecificBurger.js
import React, { Component } from "react";
import { connect } from "react-redux";
import classes from "./SpecificBurger.module.scss";
import { FontAwesomeIcon } from "@fortawesome/react-fontawesome";
import CartModal from "../../components/CartModal/CartModal";
import { history } from "../../index";
class SpecificBurger extends Component {
state = {
toggle: false,
name: "",
price: "",
calories: "",
description: "",
url: "",
plusSign: false,
quantity: null
};
componentWillMount = () => {
const {
name,
price,
calories,
description,
url
} = this.props.history.location.state;
this.setState({ name, price, calories, description, url });
};
addToCart = name => {
this.props.dispatch({ type: "ADD ITEM TO CART", name: name });
this.props.dispatch({ type: "ADDED TO CART ITEMS INFO" });
this.setState({ plusSign: true });
setTimeout(() => {
this.setState({ plusSign: false });
}, 300);
};
removeFromCart = name => {
this.props.dispatch({ type: "REMOVE ITEM FROM CART", name: name });
this.props.dispatch({ type: "ADDED TO CART ITEMS INFO" });
};
deleteItemFromCart = name => {
this.props.dispatch({ type: "DELETE ITEM FROM CART", name });
};
handleContinue = () => {
if (!!this.props.id) {
history.push("/checkout");
this.props.dispatch({ type: "NORMAL ORDER", value: true });
this.props.dispatch({
type: "ORDER FROM CUSTOMIZED SECTION",
value: false
});
} else {
history.push("/signin");
this.props.dispatch({ type: "NORMAL ORDER", value: true });
this.props.dispatch({
type: "ORDER FROM CUSTOMIZED SECTION",
value: false
});
}
};
cartToggle = () => {
const prev = this.state.toggle;
this.setState({ toggle: !prev });
};
closeCart = () => {
console.log("click");
const prev = this.state.toggle;
this.setState({ toggle: !prev });
};
quantity = e => {
let getQuantity = e.target.value;
this.setState({ quantity: Number(getQuantity) });
};
enterQuantity = () => {
this.props.dispatch({
type: "ADD QUANTITY TO CART",
quantity: this.state.quantity,
name: this.state.name
});
this.props.dispatch({ type: "ADDED TO CART ITEMS INFO" });
};
render() {
console.log(this.state);
let sign;
if (this.state.plusSign) {
sign = "check";
} else {
sign = "plus-circle";
}
const { name, price, calories, description, url } = this.state;
return (
<div className={classes.container}>
<div className={classes.descriptionContainer}>
<div className={classes.row}>
<div className={classes.description}>
<h1 className={classes.name}>{name}</h1>
<p className={classes.statement}>{description}</p>
<h3 className={classes.calorie}>{calories} cal</h3>
<div className={classes.imgContainer}>
<img src={url} className={classes.burgerImg} />
</div>
{Object.keys(this.props.priceList).length > 0 ? (
<div>
<button
className={classes.addToMeal}
onClick={() => this.addToCart(name)}
>
Order
<FontAwesomeIcon
icon={["fas", sign]}
transform="right-5 grow-2.5"
/>
</button>
<span style={{ display: "inlineBlock", fontSize: "18px" }}>
<label>
Enter Quantity :{" "}
<input
style={{ padding: "5px" }}
onChange={this.quantity}
/>
<FontAwesomeIcon
icon={["fas", "plus-circle"]}
transform="right-5 grow-2.5"
onClick={this.enterQuantity}
/>
</label>
</span>
</div>
) : null}
</div>
</div>
</div>
<div className={classes.checkout}>
{this.props.totalItemsInTheCart > 0 ? (
<div className={classes.row}>
<h1 className={classes.meal}>My meal</h1>
<div className={classes.itemsContainer}>
{this.props.allItemsInTheCart.map(
({ name, quantity, price }, index) => {
return (
<div className={classes.items} key={index}>
<div className={classes.itemDes}>
<FontAwesomeIcon
icon={["fas", "times-circle"]}
color="red"
transform="down-2"
onClick={() => {
this.deleteItemFromCart(name);
}}
className={classes.removeBtn}
/>
<p>
{quantity} x {name}
</p>
<FontAwesomeIcon
icon={["fas", "plus-circle"]}
transform="down-2"
color="green"
onClick={() => this.addToCart(name, quantity)}
className={classes.increaseBtn}
/>
<FontAwesomeIcon
icon={["fas", "minus-circle"]}
color="red"
transform="down-2"
onClick={() => this.removeFromCart(name)}
className={classes.decreaseBtn}
/>
</div>
<div className={classes.itemPrice}>
<strong>{price} BDT</strong>
</div>
</div>
);
}
)}
</div>
<h2 style={{ margin: "3rem 0 .1rem 0" }}>
Total Price :{" "}
{this.props.allItemsInTheCart.reduce((total, itm) => {
return total + itm.price;
}, 0)}
</h2>
<button
className={classes.continue}
onClick={this.handleContinue}
>
Continue
</button>
</div>
) : (
<h3 style={{ textAlign: "center" }}>
Please add burgers in the cart
</h3>
)}
</div>
<CartModal on={this.state.toggle} closeCart={this.closeCart} />
{this.state.toggle ? (
<button className={classes.viewCart} onClick={this.handleContinue}>
Checkout
</button>
) : (
<button className={classes.viewCart} onClick={this.cartToggle}>
View Cart
</button>
)}
</div>
);
}
}
const mapStateToProps = (state, props) => {
let itm = state.adminReducer.allBurgers.find(
itm => props.match.params.id == itm.id
);
return {
all_burgers: state.adminReducer.allBurgers,
allItemsInTheCart: state.adminReducer.addedToCartItmsInfo,
speceficItem: itm,
totalItemsInTheCart: state.adminReducer.totalItemsInTheCart,
priceList: state.adminReducer.price,
id: state.authReducer.id
};
};
export default connect(mapStateToProps)(SpecificBurger);
| 4e3f7740a0b18327f82811477b2e024ec941cae6 | [
"SCSS",
"JavaScript"
] | 8 | SCSS | tanveerctg/latest_new_burger_app | aa4e0f31d1e1107a8d9ed153a122b3c2393a892e | c2efb7d69479420d6a95b39f700807467088b490 |
refs/heads/master | <file_sep>#include "al2o3_platform/platform.h"
#include "tiny_imageformat/tinyimageformat_base.h"
#include "tiny_ktx/tinyktx.h"
<file_sep>#include "tiny_ktx/tinyktx.h"
#include "al2o3_platform/platform.h"
#include "al2o3_memory/memory.h"
#include "al2o3_catch2/catch2.hpp"
#include "al2o3_vfile/vfile.hpp"
#include "al2o3_stb/stb_image.h"
#include "al2o3_os/filesystem.h"
static const char* gBasePath = "input/testimages";
#define SET_PATH() char existCurDir[1024]; \
Os_GetCurrentDir(existCurDir, sizeof(existCurDir)); \
char path[2048]; \
strcpy(path, existCurDir); \
strcat(path, gBasePath); \
Os_SetCurrentDir(path)
#define RESTORE_PATH() Os_SetCurrentDir(existCurDir)
static void tinyktxCallbackError(void *user, char const *msg) {
LOGERROR("Tiny_Ktx ERROR: %s", msg);
}
static void *tinyktxCallbackAlloc(void *user, size_t size) {
return MEMORY_MALLOC(size);
}
static void tinyktxCallbackFree(void *user, void *data) {
MEMORY_FREE(data);
}
static size_t tinyktxCallbackRead(void *user, void* data, size_t size) {
auto handle = (VFile_Handle) user;
return VFile_Read(handle, data, size);
}
static bool tinyktxCallbackSeek(void *user, int64_t offset) {
auto handle = (VFile_Handle) user;
return VFile_Seek(handle, offset, VFile_SD_Begin);
}
static int64_t tinyktxCallbackTell(void *user) {
auto handle = (VFile_Handle) user;
return VFile_Tell(handle);
}
static int stbIoCallbackRead(void *user, char *data, int size) {
auto handle = (VFile_Handle) user;
return (int) VFile_Read(handle, data, size);
}
static void stbIoCallbackSkip(void *user, int n) {
auto handle = (VFile_Handle) user;
VFile_Seek(handle, n, VFile_SD_Current);
}
static int stbIoCallbackEof(void *user) {
auto handle = (VFile_Handle) user;
return VFile_IsEOF(handle);
}
TEST_CASE("Check Files", "[TinyKtx Loader]") {
SET_PATH();
#define CHK_FILE_EXISTS(filename) \
{ VFile::ScopedFile reffile = VFile::File::FromFile(filename, Os_FM_ReadBinary); \
if(!reffile) { \
LOGERROR("This must run in the directory input/testimages/ that can be got from http://github/DeanoC/taylor_imagetests"); \
REQUIRE(reffile); \
} }
CHK_FILE_EXISTS("rgb-reference.ktx");
CHK_FILE_EXISTS("rgb.ppm");
CHK_FILE_EXISTS("luminance-reference-metadata.ktx");
CHK_FILE_EXISTS("luminance.pgm");
CHK_FILE_EXISTS("level0.ppm");
CHK_FILE_EXISTS("level1.ppm");
CHK_FILE_EXISTS("level2.ppm");
CHK_FILE_EXISTS("level3.ppm");
CHK_FILE_EXISTS("level4.ppm");
CHK_FILE_EXISTS("level5.ppm");
CHK_FILE_EXISTS("level6.ppm");
#undef CHK_FILE_EXISTS
}
TEST_CASE("TinyKtx Create/Destroy Context", "[TinyKtx Loader]") {
TinyKtx_Callbacks callbacks {
&tinyktxCallbackError,
&tinyktxCallbackAlloc,
&tinyktxCallbackFree,
tinyktxCallbackRead,
&tinyktxCallbackSeek,
&tinyktxCallbackTell
};
VFile::ScopedFile file = VFile::File::FromFile("rgb-reference.ktx", Os_FM_ReadBinary);
if(!file) {
LOGERROR("This must run in the directory input/testimages/ that can be got from http://github/DeanoC/taylor_imagetests");
REQUIRE(file);
}
auto ctx = TinyKtx_CreateContext(&callbacks, (void*)file.owned);
REQUIRE(ctx);
TinyKtx_DestroyContext(ctx);
}
TEST_CASE("TinyKtx readheader & dimensions", "[TinyKtx Loader]") {
TinyKtx_Callbacks callbacks {
&tinyktxCallbackError,
&tinyktxCallbackAlloc,
&tinyktxCallbackFree,
tinyktxCallbackRead,
&tinyktxCallbackSeek,
&tinyktxCallbackTell
};
VFile::ScopedFile file = VFile::File::FromFile("rgb-reference.ktx", Os_FM_ReadBinary);
REQUIRE(file);
auto ctx = TinyKtx_CreateContext(&callbacks, (void*)file.owned);
REQUIRE(TinyKtx_ReadHeader(ctx));
auto w = TinyKtx_Width(ctx);
auto h = TinyKtx_Height(ctx);
auto d = TinyKtx_Depth(ctx);
auto s = TinyKtx_ArraySlices(ctx);
uint32_t wd, hd, dd, sd;
TinyKtx_Dimensions(ctx, &wd, &hd, &dd, &sd);
REQUIRE(w == wd);
REQUIRE(h == hd);
REQUIRE(d == dd);
REQUIRE(s == sd);
REQUIRE(w == 128);
REQUIRE(h == 128);
REQUIRE(d == 0);
REQUIRE(s == 0);
REQUIRE(TinyKtx_NumberOfMipmaps(ctx) == 1);
TinyKtx_DestroyContext(ctx);
}
static bool CmpFlipped( uint32_t w,
uint32_t h,
uint8_t pixByte,
uint32_t srcStride,
uint32_t dstStride,
uint8_t const* src,
uint8_t const* dst) {
dst = dst + ((h-1) * dstStride);
for (auto i = 0u; i < h; ++i) {
uint8_t const *srcBackup = src;
uint8_t const *dstBackup = dst;
for (auto j = 0u; j < w; ++j) {
for(auto p = 0u; p < pixByte;++p) {
if(src[p] != dst[p]) return false;
}
src += pixByte;
dst += pixByte;
}
src = srcBackup + srcStride;
dst = dstBackup - dstStride;
}
return true;
}
static bool CmpSame( uint32_t w,
uint32_t h,
uint8_t pixByte,
uint32_t srcStride,
uint32_t dstStride,
uint8_t const* src,
uint8_t const* dst) {
for (auto i = 0u; i < h; ++i) {
uint8_t const *srcBackup = src;
uint8_t const *dstBackup = dst;
for (auto j = 0u; j < w; ++j) {
for(auto p = 0u; p < pixByte;++p) {
if(src[p] != dst[p]) return false;
}
src += pixByte;
dst += pixByte;
}
src = srcBackup + srcStride;
dst = dstBackup + dstStride;
}
return true;
}
TEST_CASE("TinyKtx rgb-reference okay", "[TinyKtx Loader]") {
TinyKtx_Callbacks callbacks {
&tinyktxCallbackError,
&tinyktxCallbackAlloc,
&tinyktxCallbackFree,
tinyktxCallbackRead,
&tinyktxCallbackSeek,
&tinyktxCallbackTell
};
stbi_io_callbacks stbi_callbacks{
&stbIoCallbackRead,
&stbIoCallbackSkip,
&stbIoCallbackEof
};
VFile::ScopedFile file = VFile::File::FromFile("rgb-reference.ktx", Os_FM_ReadBinary);
VFile::ScopedFile reffile = VFile::File::FromFile("rgb.ppm", Os_FM_ReadBinary);
REQUIRE(file);
REQUIRE(reffile);
auto ctx = TinyKtx_CreateContext(&callbacks, (void*)file.owned);
REQUIRE(TinyKtx_ReadHeader(ctx));
size_t origin = VFile_Tell(reffile);
int w = 0, h = 0, cmp = 0;
stbi_info_from_callbacks(&stbi_callbacks, (void*)reffile.owned, &w, &h, &cmp);
REQUIRE(w == TinyKtx_Width(ctx));
REQUIRE(h == TinyKtx_Height(ctx));
REQUIRE(TinyKtx_GetFormat(ctx) == TKTX_R8G8B8_UNORM);
VFile_Seek(reffile, origin, VFile_SD_Begin);
stbi_uc *refdata = stbi_load_from_callbacks(&stbi_callbacks, (void*)reffile.owned, &w, &h, &cmp, cmp);
REQUIRE(refdata);
auto ktxdata = (uint8_t const*)TinyKtx_ImageRawData(ctx, 0);
REQUIRE(CmpFlipped(w, h, 3, w * cmp, w * cmp, refdata, ktxdata));
MEMORY_FREE((void*)refdata);
TinyKtx_DestroyContext(ctx);
}
TEST_CASE("TinyKtx luminance-reference okay", "[TinyKtx Loader]") {
TinyKtx_Callbacks callbacks {
&tinyktxCallbackError,
&tinyktxCallbackAlloc,
&tinyktxCallbackFree,
tinyktxCallbackRead,
&tinyktxCallbackSeek,
&tinyktxCallbackTell
};
stbi_io_callbacks stbi_callbacks{
&stbIoCallbackRead,
&stbIoCallbackSkip,
&stbIoCallbackEof
};
VFile::ScopedFile file = VFile::File::FromFile("luminance-reference-metadata.ktx", Os_FM_ReadBinary);
VFile::ScopedFile reffile = VFile::File::FromFile("luminance.pgm", Os_FM_ReadBinary);
REQUIRE(file);
REQUIRE(reffile);
auto ctx = TinyKtx_CreateContext(&callbacks, (void*)file.owned);
REQUIRE(TinyKtx_ReadHeader(ctx));
size_t origin = VFile_Tell(reffile);
int w = 0, h = 0, cmp = 0;
stbi_info_from_callbacks(&stbi_callbacks, (void*)reffile.owned, &w, &h, &cmp);
REQUIRE(w == TinyKtx_Width(ctx));
REQUIRE(h == TinyKtx_Height(ctx));
REQUIRE(TinyKtx_GetFormat(ctx) == TKTX_R8_UNORM);
REQUIRE(cmp == 1);
VFile_Seek(reffile, origin, VFile_SD_Begin);
stbi_uc *refdata = stbi_load_from_callbacks(&stbi_callbacks, (void*)reffile.owned, &w, &h, &cmp, cmp);
REQUIRE(refdata);
auto ktxdata = (uint8_t const*)TinyKtx_ImageRawData(ctx, 0);
REQUIRE(CmpSame(w, h, 1, w * cmp, w * cmp, refdata, ktxdata));
MEMORY_FREE((void*)refdata);
TinyKtx_DestroyContext(ctx);
}
TEST_CASE("TinyKtx git hub #2 (image size before image raw data broken) fix test", "[TinyKtx Loader]") {
TinyKtx_Callbacks callbacks {
&tinyktxCallbackError,
&tinyktxCallbackAlloc,
&tinyktxCallbackFree,
tinyktxCallbackRead,
&tinyktxCallbackSeek,
&tinyktxCallbackTell
};
stbi_io_callbacks stbi_callbacks{
&stbIoCallbackRead,
&stbIoCallbackSkip,
&stbIoCallbackEof
};
VFile::ScopedFile file = VFile::File::FromFile("rgb-reference.ktx", Os_FM_ReadBinary);
VFile::ScopedFile reffile = VFile::File::FromFile("rgb.ppm", Os_FM_ReadBinary);
REQUIRE(file);
REQUIRE(reffile);
auto ctx = TinyKtx_CreateContext(&callbacks, (void*)file.owned);
REQUIRE(TinyKtx_ReadHeader(ctx));
size_t origin = VFile_Tell(reffile);
int w = 0, h = 0, cmp = 0;
stbi_info_from_callbacks(&stbi_callbacks, (void*)reffile.owned, &w, &h, &cmp);
uint64_t memoryRequirement = sizeof(stbi_uc) * w * h * cmp;
// perform an image size op, this shouldn't break the later image raw data if this if fixed
REQUIRE(memoryRequirement == TinyKtx_ImageSize(ctx, 0));
VFile_Seek(reffile, origin, VFile_SD_Begin);
stbi_uc const *refdata = stbi_load_from_callbacks(&stbi_callbacks, (void*)reffile.owned, &w, &h, &cmp, cmp);
REQUIRE(refdata);
auto ktxdata = (uint8_t const*)TinyKtx_ImageRawData(ctx, 0);
REQUIRE(CmpFlipped(w, h, 3, w * cmp, w * cmp, refdata, ktxdata));
MEMORY_FREE((void*)refdata);
TinyKtx_DestroyContext(ctx);
}
TEST_CASE("TinyKtx mipmap reference check", "[TinyKtx Loader]") {
TinyKtx_Callbacks callbacks {
&tinyktxCallbackError,
&tinyktxCallbackAlloc,
&tinyktxCallbackFree,
tinyktxCallbackRead,
&tinyktxCallbackSeek,
&tinyktxCallbackTell
};
stbi_io_callbacks stbi_callbacks{
&stbIoCallbackRead,
&stbIoCallbackSkip,
&stbIoCallbackEof
};
VFile::ScopedFile file = VFile::File::FromFile("rgb-mipmap-reference.ktx", Os_FM_ReadBinary);
VFile::ScopedFile reffile[7] {
VFile::File::FromFile("level0.ppm", Os_FM_ReadBinary),
VFile::File::FromFile("level1.ppm", Os_FM_ReadBinary),
VFile::File::FromFile("level2.ppm", Os_FM_ReadBinary),
VFile::File::FromFile("level3.ppm", Os_FM_ReadBinary),
VFile::File::FromFile("level4.ppm", Os_FM_ReadBinary),
VFile::File::FromFile("level5.ppm", Os_FM_ReadBinary),
VFile::File::FromFile("level6.ppm", Os_FM_ReadBinary),
};
REQUIRE(file);
auto ctx = TinyKtx_CreateContext(&callbacks, (void*)file.owned);
REQUIRE(TinyKtx_ReadHeader(ctx));
REQUIRE(TinyKtx_NumberOfMipmaps(ctx) == 7);
for (auto i = 0u; i < 7; ++i) {
size_t origin = VFile_Tell(reffile[i]);
int w = 0, h = 0, cmp = 0;
stbi_info_from_callbacks(&stbi_callbacks, (void*)reffile[i].owned, &w, &h, &cmp);
VFile_Seek(reffile[i], origin, VFile_SD_Begin);
stbi_uc const *refdata = stbi_load_from_callbacks(&stbi_callbacks, (void*)reffile[i].owned, &w, &h, &cmp, cmp);
REQUIRE(refdata);
uint32_t const srcStride = w * cmp;
uint32_t dstStride = srcStride;
if( i < 5) {
uint64_t const memoryRequirement = sizeof(stbi_uc) * w * h * cmp;
REQUIRE(memoryRequirement == TinyKtx_ImageSize(ctx, i));
REQUIRE(!TinyKtx_IsMipMapLevelUnpacked(ctx,i));
} else {
REQUIRE(TinyKtx_IsMipMapLevelUnpacked(ctx,i));
dstStride = TinyKtx_UnpackedRowStride(ctx,i);
if(i == 5) {
REQUIRE(dstStride == 8);
} else if(i == 6) {
REQUIRE(dstStride == 4);
}
}
auto ktxdata = (uint8_t const*)TinyKtx_ImageRawData(ctx, i);
REQUIRE(CmpFlipped(w, h, 3, srcStride, dstStride, refdata, ktxdata));
MEMORY_FREE((void*)refdata);
}
TinyKtx_DestroyContext(ctx);
}<file_sep># tiny_ktx
Small C based KTX texture loader (inspired by syoyo tiny libraries)
KTX textures can handle
* Almost any format of texture data
* 1D, 2D, 3D and cubemaps textures
* Texture arrays
* Mipmaps
* Key value pairs for custom data extensions
Its an efficient open format for almost every realtime texture data you could want.
## What it does?
* Loads Khronos KTX textures
* Saves Khronos KTX textures
* Optionally provide format in either native GL/KTX style or Vulkan/Dx12/Metal style
* Optionally provides GL defines requires to read KTX files without GL
tiny_ktx is a very low level API as such it only handles these parts, it doesn't process the data in any way
## Requirements
None except a C compiler (TODO test which version 89 or 99)
By default uses 4 std lib headers
* stdint.h -for uint32_t and int64_t
* stdbool.h - for bool
* stddef.h - for size_t
* string.h - for memcpy
However if the types/functions are provided you can opt out
of these being included via *TINYKTX_HAVE_UINTXX_T* etc.
## How to build
* include tinyktx.h from include/tiny_ktx in your project
* in 1 file in your project define *TINYKTX_IMPLEMENTATION* before tinyktx.h
if using cmake and want a library version just add this using add_subdirectory
and add tiny_ktx as a dependency
## Handling KTX format
KTX file are based on OpenGL which has evolved a fairly complex pixel format system
Whilst this may be useful if you are importing into a GL application for other its quite hard to convert.
An optional part of TinyKtx (default is on) will convert these into a more Vulkan/Dx12/Metal format.
Rather than the multiple uint32_t types KTX stores format in, TinyKtx provides a single large enum.
*TinyKtx_GetFormatGL* will give you the format directly as stored in the KTX file
*TinyKtx_GetFormat* will provide a single value converted from the KTX GL provided type.
if TinyKtx_GetFormat can't convert it will return *TKTX_UNDEFINED*
## How to load a KTX
Create a contex using *TinyKtx_CreateContext* passing in callbacks for
* optional error report
* alloc
* free
* read
* seek
* tell
All are provides a void* user data argument for file handle etc.
```
Read the header (TinyKtx_ReadHeader).
A tell/read should be at the start of the KTX data
Query the dimension, format etc. (TinyKtx_Width etc.)
For each mipmap level in the file (TinyKtx_NumberOfMipmaps)
get the mipmap size (TinyKtx_ImageSize)
get the mipmap data (TinyKtx_ImageRawData)
```
Load snippet
```c
static void tinyktxCallbackError(void *user, char const *msg) {
LOGERROR("Tiny_Ktx ERROR: %s", msg);
}
static void *tinyktxCallbackAlloc(void *user, size_t size) {
return MEMORY_MALLOC(size);
}
static void tinyktxCallbackFree(void *user, void *data) {
MEMORY_FREE(data);
}
static size_t tinyktxCallbackRead(void *user, void* data, size_t size) {
auto handle = (VFile_Handle) user;
return VFile_Read(handle, data, size);
}
static bool tinyktxCallbackSeek(void *user, int64_t offset) {
auto handle = (VFile_Handle) user;
return VFile_Seek(handle, offset, VFile_SD_Begin);
}
static int64_t tinyktxCallbackTell(void *user) {
auto handle = (VFile_Handle) user;
return VFile_Tell(handle);
}
AL2O3_EXTERN_C Image_ImageHeader const *Image_LoadKTX(VFile_Handle handle) {
TinyKtx_Callbacks callbacks {
&tinyktxCallbackError,
&tinyktxCallbackAlloc,
&tinyktxCallbackFree,
tinyktxCallbackRead,
&tinyktxCallbackSeek,
&tinyktxCallbackTell
};
auto ctx = TinyKtx_CreateContext( &callbacks, handle);
TinyKtx_ReadHeader(ctx);
uint32_t w = TinyKtx_Width(ctx);
uint32_t h = TinyKtx_Height(ctx);
uint32_t d = TinyKtx_Depth(ctx);
uint32_t s = TinyKtx_ArraySlices(ctx);
ImageFormat fmt = ImageFormatToTinyKtxFormat(TinyKtx_GetFormat(ctx));
if(fmt == ImageFormat_UNDEFINED) {
TinyKtx_DestroyContext(ctx);
return nullptr;
}
Image_ImageHeader const* topImage = nullptr;
Image_ImageHeader const* prevImage = nullptr;
for(auto i = 0u; i < TinyKtx_NumberOfMipmaps(ctx);++i) {
auto image = Image_CreateNoClear(w, h, d, s, fmt);
if(i == 0) topImage = image;
if(Image_ByteCountOf(image) != TinyKtx_ImageSize(ctx, i)) {
LOGERROR("KTX file %s mipmap %i size error", VFile_GetName(handle), i);
Image_Destroy(topImage);
TinyKtx_DestroyContext(ctx);
return nullptr;
}
memcpy(Image_RawDataPtr(image), TinyKtx_ImageRawData(ctx, i), Image_ByteCountOf(image));
if(prevImage) {
auto p = (Image_ImageHeader *)prevImage;
p->nextType = Image_NextType::Image_IT_MipMaps;
p->nextImage = image;
}
if(w > 1) w = w / 2;
if(h > 1) h = h / 2;
if(d > 1) d = d / 2;
prevImage = image;
}
TinyKtx_DestroyContext(ctx);
return topImage;
}
```
## How to save a KTX
Saving doesn't need a context just a *TinyKtx_WriteCallbacks* with
* error reporting
* alloc (not currently used)
* free (not currently used)
* write
```
A TinyKtx_WriteImage or TinyKtx_WriteImageGL are the only API entry point for saving a KTX file.
Provide it with the format (in either style), dimensions and whether its a cube map or not
Pass the number of mipmaps and arrays filled with the size of each mipmap image and a pointer to the data
```
Save snippet
```c
static void tinyktxCallbackError(void *user, char const *msg) {
LOGERROR("Tiny_Ktx ERROR: %s", msg);
}
static void *tinyktxCallbackAlloc(void *user, size_t size) {
return MEMORY_MALLOC(size);
}
static void tinyktxCallbackFree(void *user, void *data) {
MEMORY_FREE(data);
}
static void tinyktxCallbackWrite(void *user, void const *data, size_t size) {
auto handle = (VFile_Handle) user;
VFile_Write(handle, data, size);
}
AL2O3_EXTERN_C bool Image_SaveKTX(Image_ImageHeader *image, VFile_Handle handle) {
using namespace Image;
TinyKtx_WriteCallbacks callback{
&tinyktxCallbackError,
&tinyktxCallbackAlloc,
&tinyktxCallbackFree,
&tinyktxCallbackWrite,
};
TinyKtx_Format fmt = ImageFormatToTinyKtxFormat(image->format);
if(fmt == TKTX_UNDEFINED) return false;
uint32_t numMipmaps = (image->nextType == Image_NextType::Image_IT_None) ? 1 : (uint32_t)Image_LinkedImageCountOf(image);
uint32_t mipmapsizes[TINYKTX_MAX_MIPMAPLEVELS];
void const* mipmaps[TINYKTX_MAX_MIPMAPLEVELS];
memset(mipmapsizes, 0, sizeof(uint32_t)*TINYKTX_MAX_MIPMAPLEVELS);
memset(mipmaps, 0, sizeof(void const*)*TINYKTX_MAX_MIPMAPLEVELS);
for(size_t i = 0; i < numMipmaps; ++i) {
mipmapsizes[i] = (uint32_t) Image_LinkedImageOf(image, i)->dataSize;
mipmaps[i] = Image_RawDataPtr(Image_LinkedImageOf(image, i));
}
return TinyKtx_WriteImage(&callback,
handle,
image->width,
image->height,
image->depth,
image->slices,
numMipmaps,
fmt,
Image_IsCubemap(image),
mipmapsizes,
mipmaps );
}
```
## Tests
Testing is done using my Taylor scriptable content processor.
[taylor_imagetest - script and data](https://github.com/DeanoC/taylor_imagetests)
[taylor - app that runs the test script) ](https://github.com/DeanoC/taylor)
## TODO
Lots of validation/tests
Save key data pairs (currently will load them but doesn't write any)
Handle endianness?
## Higher level
tiny_ktx is the lowel level part of my gfx_imageio and gfx_image libraries.
They handle conversion or data, reading and writing and load/save other format ast well
taylor is a lua scripted content command line program that uses the above library for processing.
If you want higher level of tiny_ktx or how tiny_ktx is used see the links below
The snippets above are from gfx_imageio
[gfx_imageio - higher level import/export image using tiny_ktx (and other formats)](https://github.com/DeanoC/gfx_imageio)
[taylor - lua scripted image processer using gfx_imageio](https://github.com/DeanoC/taylor)
<file_sep>// MIT license see full LICENSE text at end of file
#pragma once
#ifndef TINY_KTX_TINYKTX2_H
#define TINY_KTX_TINYKTX2_H
#ifndef TINYKTX_HAVE_UINTXX_T
#include <stdint.h> // for uint32_t and int64_t
#endif
#ifndef TINYKTX_HAVE_BOOL
#include <stdbool.h> // for bool
#endif
#ifndef TINYKTX_HAVE_SIZE_T
#include <stddef.h> // for size_t
#endif
#ifndef TINYKTX_HAVE_MEMCPY
#include <string.h> // for memcpy
#endif
#ifdef __cplusplus
extern "C" {
#endif
typedef struct TinyKtx2_Context *TinyKtx2_ContextHandle;
#define TINYKTX2_MAX_MIPMAPLEVELS 16
typedef void *(*TinyKtx2_AllocFunc)(void *user, size_t size);
typedef void (*TinyKtx2_FreeFunc)(void *user, void *memory);
typedef size_t (*TinyKtx2_ReadFunc)(void *user, void *buffer, size_t byteCount);
typedef bool (*TinyKtx2_SeekFunc)(void *user, int64_t offset);
typedef int64_t (*TinyKtx2_TellFunc)(void *user);
typedef void (*TinyKtx2_ErrorFunc)(void *user, char const *msg);
typedef bool (*TinyKtx2_SuperDecompress)(void* user, void* const sgdData, void const* src, size_t srcSize, void const* dst, size_t dstSize);
typedef struct TinyKtx2_SuperDecompressTableEntry {
uint32_t superId;
TinyKtx2_SuperDecompress decompressor;
} TinyKtx2_SuperDecompressTableEntry;
typedef struct TinyKtx2_Callbacks {
TinyKtx2_ErrorFunc error;
TinyKtx2_AllocFunc alloc;
TinyKtx2_FreeFunc free;
TinyKtx2_ReadFunc read;
TinyKtx2_SeekFunc seek;
TinyKtx2_TellFunc tell;
size_t numSuperDecompressors;
TinyKtx2_SuperDecompressTableEntry const* superDecompressors;
} TinyKtx2_Callbacks;
TinyKtx2_ContextHandle TinyKtx2_CreateContext(TinyKtx2_Callbacks const *callbacks, void *user);
void TinyKtx2_DestroyContext(TinyKtx_ContextHandle handle);
// reset lets you reuse the context for another file (saves an alloc/free cycle)
void TinyKtx2_Reset(TinyKtx2_ContextHandle handle);
// call this to read the header file should already be at the start of the KTX data
bool TinyKtx2_ReadHeader(TinyKtx2_ContextHandle handle);
// this is slow linear search. TODO add iterator style reading of key value pairs
bool TinyKtx2_GetValue(TinyKtx2_ContextHandle handle, char const *key, void const **value);
bool TinyKtx2_Is1D(TinyKtx2_ContextHandle handle);
bool TinyKtx2_Is2D(TinyKtx2_ContextHandle handle);
bool TinyKtx2_Is3D(TinyKtx2_ContextHandle handle);
bool TinyKtx2_IsCubemap(TinyKtx2_ContextHandle handle);
bool TinyKtx2_IsArray(TinyKtx2_ContextHandle handle);
bool TinyKtx2_Dimensions(TinyKtx2_ContextHandle handle, uint32_t* width, uint32_t* height, uint32_t* depth, uint32_t* slices);
uint32_t TinyKtx2_Width(TinyKtx2_ContextHandle handle);
uint32_t TinyKtx2_Height(TinyKtx2_ContextHandle handle);
uint32_t TinyKtx2_Depth(TinyKtx2_ContextHandle handle);
uint32_t TinyKtx2_ArraySlices(TinyKtx2_ContextHandle handle);
bool TinyKtx2_GetFormatGL(TinyKtx2_ContextHandle handle, uint32_t *glformat, uint32_t *gltype, uint32_t *glinternalformat, uint32_t* typesize, uint32_t* glbaseinternalformat);
bool TinyKtx2_NeedsGenerationOfMipmaps(TinyKtx2_ContextHandle handle);
bool TinyKtx2_NeedsEndianCorrecting(TinyKtx2_ContextHandle handle);
uint32_t TinyKtx2_NumberOfMipmaps(TinyKtx2_ContextHandle handle);
uint32_t TinyKtx2_ImageSize(TinyKtx2_ContextHandle handle, uint32_t mipmaplevel);
bool TinyKtx2_IsMipMapLevelUnpacked(TinyKtx2_ContextHandle handle, uint32_t mipmaplevel);
// this is required to read Unpacked data correctly
uint32_t TinyKtx2_UnpackedRowStride(TinyKtx2_ContextHandle handle, uint32_t mipmaplevel);
// data return by ImageRawData is owned by the context. Don't free it!
void const *TinyKtx2_ImageRawData(TinyKtx2_ContextHandle handle, uint32_t mipmaplevel);
typedef void (*TinyKtx2_WriteFunc)(void *user, void const *buffer, size_t byteCount);
typedef struct TinyKtx2_WriteCallbacks {
TinyKtx2_ErrorFunc error;
TinyKtx2_AllocFunc alloc;
TinyKtx2_FreeFunc free;
TinyKtx2_WriteFunc write;
} TinyKtx2_WriteCallbacks;
bool TinyKtx2_WriteImageGL(TinyKtx2_WriteCallbacks const *callbacks,
void *user,
uint32_t width,
uint32_t height,
uint32_t depth,
uint32_t slices,
uint32_t mipmaplevels,
uint32_t format,
uint32_t internalFormat,
uint32_t baseFormat,
uint32_t type,
uint32_t typeSize,
bool cubemap,
uint32_t const *mipmapsizes,
void const **mipmaps);
// ktx v1 is based on GL (slightly confusing imho) texture format system
// there is format, internal format, type etc.
// we try and expose a more dx12/vulkan/metal style of format
// but obviously still need to GL data so bare with me.
// a TinyKTX_Format is the equivilent to GL/KTX Format and Type
// the API doesn't expose the actual values (which come from GL itself)
// but provide an API call to crack them back into the actual GL values).
// Ktx v2 is based on VkFormat and also DFD, so we now base the
// enumeration values of TinyKtx_Format on the Vkformat values where possible
#ifndef TINYKTX_DEFINED
typedef enum TinyImageFormat_VkFormat {
TIF_VK_FORMAT_UNDEFINED = 0,
TIF_VK_FORMAT_R4G4_UNORM_PACK8 = 1,
TIF_VK_FORMAT_R4G4B4A4_UNORM_PACK16 = 2,
TIF_VK_FORMAT_B4G4R4A4_UNORM_PACK16 = 3,
TIF_VK_FORMAT_R5G6B5_UNORM_PACK16 = 4,
TIF_VK_FORMAT_B5G6R5_UNORM_PACK16 = 5,
TIF_VK_FORMAT_R5G5B5A1_UNORM_PACK16 = 6,
TIF_VK_FORMAT_B5G5R5A1_UNORM_PACK16 = 7,
TIF_VK_FORMAT_A1R5G5B5_UNORM_PACK16 = 8,
TIF_VK_FORMAT_R8_UNORM = 9,
TIF_VK_FORMAT_R8_SNORM = 10,
TIF_VK_FORMAT_R8_USCALED = 11,
TIF_VK_FORMAT_R8_SSCALED = 12,
TIF_VK_FORMAT_R8_UINT = 13,
TIF_VK_FORMAT_R8_SINT = 14,
TIF_VK_FORMAT_R8_SRGB = 15,
TIF_VK_FORMAT_R8G8_UNORM = 16,
TIF_VK_FORMAT_R8G8_SNORM = 17,
TIF_VK_FORMAT_R8G8_USCALED = 18,
TIF_VK_FORMAT_R8G8_SSCALED = 19,
TIF_VK_FORMAT_R8G8_UINT = 20,
TIF_VK_FORMAT_R8G8_SINT = 21,
TIF_VK_FORMAT_R8G8_SRGB = 22,
TIF_VK_FORMAT_R8G8B8_UNORM = 23,
TIF_VK_FORMAT_R8G8B8_SNORM = 24,
TIF_VK_FORMAT_R8G8B8_USCALED = 25,
TIF_VK_FORMAT_R8G8B8_SSCALED = 26,
TIF_VK_FORMAT_R8G8B8_UINT = 27,
TIF_VK_FORMAT_R8G8B8_SINT = 28,
TIF_VK_FORMAT_R8G8B8_SRGB = 29,
TIF_VK_FORMAT_B8G8R8_UNORM = 30,
TIF_VK_FORMAT_B8G8R8_SNORM = 31,
TIF_VK_FORMAT_B8G8R8_USCALED = 32,
TIF_VK_FORMAT_B8G8R8_SSCALED = 33,
TIF_VK_FORMAT_B8G8R8_UINT = 34,
TIF_VK_FORMAT_B8G8R8_SINT = 35,
TIF_VK_FORMAT_B8G8R8_SRGB = 36,
TIF_VK_FORMAT_R8G8B8A8_UNORM = 37,
TIF_VK_FORMAT_R8G8B8A8_SNORM = 38,
TIF_VK_FORMAT_R8G8B8A8_USCALED = 39,
TIF_VK_FORMAT_R8G8B8A8_SSCALED = 40,
TIF_VK_FORMAT_R8G8B8A8_UINT = 41,
TIF_VK_FORMAT_R8G8B8A8_SINT = 42,
TIF_VK_FORMAT_R8G8B8A8_SRGB = 43,
TIF_VK_FORMAT_B8G8R8A8_UNORM = 44,
TIF_VK_FORMAT_B8G8R8A8_SNORM = 45,
TIF_VK_FORMAT_B8G8R8A8_USCALED = 46,
TIF_VK_FORMAT_B8G8R8A8_SSCALED = 47,
TIF_VK_FORMAT_B8G8R8A8_UINT = 48,
TIF_VK_FORMAT_B8G8R8A8_SINT = 49,
TIF_VK_FORMAT_B8G8R8A8_SRGB = 50,
TIF_VK_FORMAT_A8B8G8R8_UNORM_PACK32 = 51,
TIF_VK_FORMAT_A8B8G8R8_SNORM_PACK32 = 52,
TIF_VK_FORMAT_A8B8G8R8_USCALED_PACK32 = 53,
TIF_VK_FORMAT_A8B8G8R8_SSCALED_PACK32 = 54,
TIF_VK_FORMAT_A8B8G8R8_UINT_PACK32 = 55,
TIF_VK_FORMAT_A8B8G8R8_SINT_PACK32 = 56,
TIF_VK_FORMAT_A8B8G8R8_SRGB_PACK32 = 57,
TIF_VK_FORMAT_A2R10G10B10_UNORM_PACK32 = 58,
TIF_VK_FORMAT_A2R10G10B10_SNORM_PACK32 = 59,
TIF_VK_FORMAT_A2R10G10B10_USCALED_PACK32 = 60,
TIF_VK_FORMAT_A2R10G10B10_SSCALED_PACK32 = 61,
TIF_VK_FORMAT_A2R10G10B10_UINT_PACK32 = 62,
TIF_VK_FORMAT_A2R10G10B10_SINT_PACK32 = 63,
TIF_VK_FORMAT_A2B10G10R10_UNORM_PACK32 = 64,
TIF_VK_FORMAT_A2B10G10R10_SNORM_PACK32 = 65,
TIF_VK_FORMAT_A2B10G10R10_USCALED_PACK32 = 66,
TIF_VK_FORMAT_A2B10G10R10_SSCALED_PACK32 = 67,
TIF_VK_FORMAT_A2B10G10R10_UINT_PACK32 = 68,
TIF_VK_FORMAT_A2B10G10R10_SINT_PACK32 = 69,
TIF_VK_FORMAT_R16_UNORM = 70,
TIF_VK_FORMAT_R16_SNORM = 71,
TIF_VK_FORMAT_R16_USCALED = 72,
TIF_VK_FORMAT_R16_SSCALED = 73,
TIF_VK_FORMAT_R16_UINT = 74,
TIF_VK_FORMAT_R16_SINT = 75,
TIF_VK_FORMAT_R16_SFLOAT = 76,
TIF_VK_FORMAT_R16G16_UNORM = 77,
TIF_VK_FORMAT_R16G16_SNORM = 78,
TIF_VK_FORMAT_R16G16_USCALED = 79,
TIF_VK_FORMAT_R16G16_SSCALED = 80,
TIF_VK_FORMAT_R16G16_UINT = 81,
TIF_VK_FORMAT_R16G16_SINT = 82,
TIF_VK_FORMAT_R16G16_SFLOAT = 83,
TIF_VK_FORMAT_R16G16B16_UNORM = 84,
TIF_VK_FORMAT_R16G16B16_SNORM = 85,
TIF_VK_FORMAT_R16G16B16_USCALED = 86,
TIF_VK_FORMAT_R16G16B16_SSCALED = 87,
TIF_VK_FORMAT_R16G16B16_UINT = 88,
TIF_VK_FORMAT_R16G16B16_SINT = 89,
TIF_VK_FORMAT_R16G16B16_SFLOAT = 90,
TIF_VK_FORMAT_R16G16B16A16_UNORM = 91,
TIF_VK_FORMAT_R16G16B16A16_SNORM = 92,
TIF_VK_FORMAT_R16G16B16A16_USCALED = 93,
TIF_VK_FORMAT_R16G16B16A16_SSCALED = 94,
TIF_VK_FORMAT_R16G16B16A16_UINT = 95,
TIF_VK_FORMAT_R16G16B16A16_SINT = 96,
TIF_VK_FORMAT_R16G16B16A16_SFLOAT = 97,
TIF_VK_FORMAT_R32_UINT = 98,
TIF_VK_FORMAT_R32_SINT = 99,
TIF_VK_FORMAT_R32_SFLOAT = 100,
TIF_VK_FORMAT_R32G32_UINT = 101,
TIF_VK_FORMAT_R32G32_SINT = 102,
TIF_VK_FORMAT_R32G32_SFLOAT = 103,
TIF_VK_FORMAT_R32G32B32_UINT = 104,
TIF_VK_FORMAT_R32G32B32_SINT = 105,
TIF_VK_FORMAT_R32G32B32_SFLOAT = 106,
TIF_VK_FORMAT_R32G32B32A32_UINT = 107,
TIF_VK_FORMAT_R32G32B32A32_SINT = 108,
TIF_VK_FORMAT_R32G32B32A32_SFLOAT = 109,
TIF_VK_FORMAT_R64_UINT = 110,
TIF_VK_FORMAT_R64_SINT = 111,
TIF_VK_FORMAT_R64_SFLOAT = 112,
TIF_VK_FORMAT_R64G64_UINT = 113,
TIF_VK_FORMAT_R64G64_SINT = 114,
TIF_VK_FORMAT_R64G64_SFLOAT = 115,
TIF_VK_FORMAT_R64G64B64_UINT = 116,
TIF_VK_FORMAT_R64G64B64_SINT = 117,
TIF_VK_FORMAT_R64G64B64_SFLOAT = 118,
TIF_VK_FORMAT_R64G64B64A64_UINT = 119,
TIF_VK_FORMAT_R64G64B64A64_SINT = 120,
TIF_VK_FORMAT_R64G64B64A64_SFLOAT = 121,
TIF_VK_FORMAT_B10G11R11_UFLOAT_PACK32 = 122,
TIF_VK_FORMAT_E5B9G9R9_UFLOAT_PACK32 = 123,
TIF_VK_FORMAT_D16_UNORM = 124,
TIF_VK_FORMAT_X8_D24_UNORM_PACK32 = 125,
TIF_VK_FORMAT_D32_SFLOAT = 126,
TIF_VK_FORMAT_S8_UINT = 127,
TIF_VK_FORMAT_D16_UNORM_S8_UINT = 128,
TIF_VK_FORMAT_D24_UNORM_S8_UINT = 129,
TIF_VK_FORMAT_D32_SFLOAT_S8_UINT = 130,
TIF_VK_FORMAT_BC1_RGB_UNORM_BLOCK = 131,
TIF_VK_FORMAT_BC1_RGB_SRGB_BLOCK = 132,
TIF_VK_FORMAT_BC1_RGBA_UNORM_BLOCK = 133,
TIF_VK_FORMAT_BC1_RGBA_SRGB_BLOCK = 134,
TIF_VK_FORMAT_BC2_UNORM_BLOCK = 135,
TIF_VK_FORMAT_BC2_SRGB_BLOCK = 136,
TIF_VK_FORMAT_BC3_UNORM_BLOCK = 137,
TIF_VK_FORMAT_BC3_SRGB_BLOCK = 138,
TIF_VK_FORMAT_BC4_UNORM_BLOCK = 139,
TIF_VK_FORMAT_BC4_SNORM_BLOCK = 140,
TIF_VK_FORMAT_BC5_UNORM_BLOCK = 141,
TIF_VK_FORMAT_BC5_SNORM_BLOCK = 142,
TIF_VK_FORMAT_BC6H_UFLOAT_BLOCK = 143,
TIF_VK_FORMAT_BC6H_SFLOAT_BLOCK = 144,
TIF_VK_FORMAT_BC7_UNORM_BLOCK = 145,
TIF_VK_FORMAT_BC7_SRGB_BLOCK = 146,
TIF_VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK = 147,
TIF_VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK = 148,
TIF_VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK = 149,
TIF_VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK = 150,
TIF_VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK = 151,
TIF_VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK = 152,
TIF_VK_FORMAT_EAC_R11_UNORM_BLOCK = 153,
TIF_VK_FORMAT_EAC_R11_SNORM_BLOCK = 154,
TIF_VK_FORMAT_EAC_R11G11_UNORM_BLOCK = 155,
TIF_VK_FORMAT_EAC_R11G11_SNORM_BLOCK = 156,
TIF_VK_FORMAT_ASTC_4x4_UNORM_BLOCK = 157,
TIF_VK_FORMAT_ASTC_4x4_SRGB_BLOCK = 158,
TIF_VK_FORMAT_ASTC_5x4_UNORM_BLOCK = 159,
TIF_VK_FORMAT_ASTC_5x4_SRGB_BLOCK = 160,
TIF_VK_FORMAT_ASTC_5x5_UNORM_BLOCK = 161,
TIF_VK_FORMAT_ASTC_5x5_SRGB_BLOCK = 162,
TIF_VK_FORMAT_ASTC_6x5_UNORM_BLOCK = 163,
TIF_VK_FORMAT_ASTC_6x5_SRGB_BLOCK = 164,
TIF_VK_FORMAT_ASTC_6x6_UNORM_BLOCK = 165,
TIF_VK_FORMAT_ASTC_6x6_SRGB_BLOCK = 166,
TIF_VK_FORMAT_ASTC_8x5_UNORM_BLOCK = 167,
TIF_VK_FORMAT_ASTC_8x5_SRGB_BLOCK = 168,
TIF_VK_FORMAT_ASTC_8x6_UNORM_BLOCK = 169,
TIF_VK_FORMAT_ASTC_8x6_SRGB_BLOCK = 170,
TIF_VK_FORMAT_ASTC_8x8_UNORM_BLOCK = 171,
TIF_VK_FORMAT_ASTC_8x8_SRGB_BLOCK = 172,
TIF_VK_FORMAT_ASTC_10x5_UNORM_BLOCK = 173,
TIF_VK_FORMAT_ASTC_10x5_SRGB_BLOCK = 174,
TIF_VK_FORMAT_ASTC_10x6_UNORM_BLOCK = 175,
TIF_VK_FORMAT_ASTC_10x6_SRGB_BLOCK = 176,
TIF_VK_FORMAT_ASTC_10x8_UNORM_BLOCK = 177,
TIF_VK_FORMAT_ASTC_10x8_SRGB_BLOCK = 178,
TIF_VK_FORMAT_ASTC_10x10_UNORM_BLOCK = 179,
TIF_VK_FORMAT_ASTC_10x10_SRGB_BLOCK = 180,
TIF_VK_FORMAT_ASTC_12x10_UNORM_BLOCK = 181,
TIF_VK_FORMAT_ASTC_12x10_SRGB_BLOCK = 182,
TIF_VK_FORMAT_ASTC_12x12_UNORM_BLOCK = 183,
TIF_VK_FORMAT_ASTC_12x12_SRGB_BLOCK = 184,
TIF_VK_FORMAT_G8B8G8R8_422_UNORM = 1000156000,
TIF_VK_FORMAT_B8G8R8G8_422_UNORM = 1000156001,
TIF_VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM = 1000156002,
TIF_VK_FORMAT_G8_B8R8_2PLANE_420_UNORM = 1000156003,
TIF_VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM = 1000156004,
TIF_VK_FORMAT_G8_B8R8_2PLANE_422_UNORM = 1000156005,
TIF_VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM = 1000156006,
TIF_VK_FORMAT_R10X6_UNORM_PACK16 = 1000156007,
TIF_VK_FORMAT_R10X6G10X6_UNORM_2PACK16 = 1000156008,
TIF_VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16 = 1000156009,
TIF_VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16 = 1000156010,
TIF_VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16 = 1000156011,
TIF_VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16 = 1000156012,
TIF_VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16 = 1000156013,
TIF_VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16 = 1000156014,
TIF_VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16 = 1000156015,
TIF_VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16 = 1000156016,
TIF_VK_FORMAT_R12X4_UNORM_PACK16 = 1000156017,
TIF_VK_FORMAT_R12X4G12X4_UNORM_2PACK16 = 1000156018,
TIF_VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16 = 1000156019,
TIF_VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16 = 1000156020,
TIF_VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16 = 1000156021,
TIF_VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16 = 1000156022,
TIF_VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16 = 1000156023,
TIF_VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16 = 1000156024,
TIF_VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16 = 1000156025,
TIF_VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16 = 1000156026,
TIF_VK_FORMAT_G16B16G16R16_422_UNORM = 1000156027,
TIF_VK_FORMAT_B16G16R16G16_422_UNORM = 1000156028,
TIF_VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM = 1000156029,
TIF_VK_FORMAT_G16_B16R16_2PLANE_420_UNORM = 1000156030,
TIF_VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM = 1000156031,
TIF_VK_FORMAT_G16_B16R16_2PLANE_422_UNORM = 1000156032,
TIF_VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM = 1000156033,
TIF_VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG = 1000054000,
TIF_VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG = 1000054001,
TIF_VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG = 1000054002,
TIF_VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG = 1000054003,
TIF_VK_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG = 1000054004,
TIF_VK_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG = 1000054005,
TIF_VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG = 1000054006,
TIF_VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG = 1000054007,
} TinyKTX_VkFormat;
#define TINYKTX_MEV(x) TKTX_##x = TIF_VK_FORMAT_##x
typedef enum TinyKtx_Format {
TINYKTX_MEV(UNDEFINED),
TINYKTX_MEV(R4G4_UNORM_PACK8),
TINYKTX_MEV(R4G4B4A4_UNORM_PACK16),
TINYKTX_MEV(B4G4R4A4_UNORM_PACK16),
TINYKTX_MEV(R5G6B5_UNORM_PACK16),
TINYKTX_MEV(B5G6R5_UNORM_PACK16),
TINYKTX_MEV(R5G5B5A1_UNORM_PACK16),
TINYKTX_MEV(B5G5R5A1_UNORM_PACK16),
TINYKTX_MEV(A1R5G5B5_UNORM_PACK16),
TINYKTX_MEV(R8_UNORM),
TINYKTX_MEV(R8_SNORM),
TINYKTX_MEV(R8_UINT),
TINYKTX_MEV(R8_SINT),
TINYKTX_MEV(R8_SRGB),
TINYKTX_MEV(R8G8_UNORM),
TINYKTX_MEV(R8G8_SNORM),
TINYKTX_MEV(R8G8_UINT),
TINYKTX_MEV(R8G8_SINT),
TINYKTX_MEV(R8G8_SRGB),
TINYKTX_MEV(R8G8B8_UNORM),
TINYKTX_MEV(R8G8B8_SNORM),
TINYKTX_MEV(R8G8B8_UINT),
TINYKTX_MEV(R8G8B8_SINT),
TINYKTX_MEV(R8G8B8_SRGB),
TINYKTX_MEV(B8G8R8_UNORM),
TINYKTX_MEV(B8G8R8_SNORM),
TINYKTX_MEV(B8G8R8_UINT),
TINYKTX_MEV(B8G8R8_SINT),
TINYKTX_MEV(B8G8R8_SRGB),
TINYKTX_MEV(R8G8B8A8_UNORM),
TINYKTX_MEV(R8G8B8A8_SNORM),
TINYKTX_MEV(R8G8B8A8_UINT),
TINYKTX_MEV(R8G8B8A8_SINT),
TINYKTX_MEV(R8G8B8A8_SRGB),
TINYKTX_MEV(B8G8R8A8_UNORM),
TINYKTX_MEV(B8G8R8A8_SNORM),
TINYKTX_MEV(B8G8R8A8_UINT),
TINYKTX_MEV(B8G8R8A8_SINT),
TINYKTX_MEV(B8G8R8A8_SRGB),
TINYKTX_MEV(A8B8G8R8_UNORM_PACK32),
TINYKTX_MEV(A8B8G8R8_SNORM_PACK32),
TINYKTX_MEV(A8B8G8R8_UINT_PACK32),
TINYKTX_MEV(A8B8G8R8_SINT_PACK32),
TINYKTX_MEV(A8B8G8R8_SRGB_PACK32),
TINYKTX_MEV(E5B9G9R9_UFLOAT_PACK32),
TINYKTX_MEV(A2R10G10B10_UNORM_PACK32),
TINYKTX_MEV(A2R10G10B10_UINT_PACK32),
TINYKTX_MEV(A2B10G10R10_UNORM_PACK32),
TINYKTX_MEV(A2B10G10R10_UINT_PACK32),
TINYKTX_MEV(B10G11R11_UFLOAT_PACK32),
TINYKTX_MEV(R16_UNORM),
TINYKTX_MEV(R16_SNORM),
TINYKTX_MEV(R16_UINT),
TINYKTX_MEV(R16_SINT),
TINYKTX_MEV(R16_SFLOAT),
TINYKTX_MEV(R16G16_UNORM),
TINYKTX_MEV(R16G16_SNORM),
TINYKTX_MEV(R16G16_UINT),
TINYKTX_MEV(R16G16_SINT),
TINYKTX_MEV(R16G16_SFLOAT),
TINYKTX_MEV(R16G16B16_UNORM),
TINYKTX_MEV(R16G16B16_SNORM),
TINYKTX_MEV(R16G16B16_UINT),
TINYKTX_MEV(R16G16B16_SINT),
TINYKTX_MEV(R16G16B16_SFLOAT),
TINYKTX_MEV(R16G16B16A16_UNORM),
TINYKTX_MEV(R16G16B16A16_SNORM),
TINYKTX_MEV(R16G16B16A16_UINT),
TINYKTX_MEV(R16G16B16A16_SINT),
TINYKTX_MEV(R16G16B16A16_SFLOAT),
TINYKTX_MEV(R32_UINT),
TINYKTX_MEV(R32_SINT),
TINYKTX_MEV(R32_SFLOAT),
TINYKTX_MEV(R32G32_UINT),
TINYKTX_MEV(R32G32_SINT),
TINYKTX_MEV(R32G32_SFLOAT),
TINYKTX_MEV(R32G32B32_UINT),
TINYKTX_MEV(R32G32B32_SINT),
TINYKTX_MEV(R32G32B32_SFLOAT),
TINYKTX_MEV(R32G32B32A32_UINT),
TINYKTX_MEV(R32G32B32A32_SINT),
TINYKTX_MEV(R32G32B32A32_SFLOAT),
TINYKTX_MEV(BC1_RGB_UNORM_BLOCK),
TINYKTX_MEV(BC1_RGB_SRGB_BLOCK),
TINYKTX_MEV(BC1_RGBA_UNORM_BLOCK),
TINYKTX_MEV(BC1_RGBA_SRGB_BLOCK),
TINYKTX_MEV(BC2_UNORM_BLOCK),
TINYKTX_MEV(BC2_SRGB_BLOCK),
TINYKTX_MEV(BC3_UNORM_BLOCK),
TINYKTX_MEV(BC3_SRGB_BLOCK),
TINYKTX_MEV(BC4_UNORM_BLOCK),
TINYKTX_MEV(BC4_SNORM_BLOCK),
TINYKTX_MEV(BC5_UNORM_BLOCK),
TINYKTX_MEV(BC5_SNORM_BLOCK),
TINYKTX_MEV(BC6H_UFLOAT_BLOCK),
TINYKTX_MEV(BC6H_SFLOAT_BLOCK),
TINYKTX_MEV(BC7_UNORM_BLOCK),
TINYKTX_MEV(BC7_SRGB_BLOCK),
TINYKTX_MEV(ETC2_R8G8B8_UNORM_BLOCK),
TINYKTX_MEV(ETC2_R8G8B8A1_UNORM_BLOCK),
TINYKTX_MEV(ETC2_R8G8B8A8_UNORM_BLOCK),
TINYKTX_MEV(ETC2_R8G8B8_SRGB_BLOCK),
TINYKTX_MEV(ETC2_R8G8B8A1_SRGB_BLOCK),
TINYKTX_MEV(ETC2_R8G8B8A8_SRGB_BLOCK),
TINYKTX_MEV(EAC_R11_UNORM_BLOCK),
TINYKTX_MEV(EAC_R11G11_UNORM_BLOCK),
TINYKTX_MEV(EAC_R11_SNORM_BLOCK),
TINYKTX_MEV(EAC_R11G11_SNORM_BLOCK),
TKTX_PVR_2BPP_BLOCK = TIF_VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG,
TKTX_PVR_2BPPA_BLOCK = TIF_VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG,
TKTX_PVR_4BPP_BLOCK = TIF_VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG,
TKTX_PVR_4BPPA_BLOCK = TIF_VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG,
TKTX_PVR_2BPP_SRGB_BLOCK = TIF_VK_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG,
TKTX_PVR_2BPPA_SRGB_BLOCK = TIF_VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG,
TKTX_PVR_4BPP_SRGB_BLOCK = TIF_VK_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG,
TKTX_PVR_4BPPA_SRGB_BLOCK = TIF_VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG,
TINYKTX_MEV(ASTC_4x4_UNORM_BLOCK),
TINYKTX_MEV(ASTC_4x4_SRGB_BLOCK),
TINYKTX_MEV(ASTC_5x4_UNORM_BLOCK),
TINYKTX_MEV(ASTC_5x4_SRGB_BLOCK),
TINYKTX_MEV(ASTC_5x5_UNORM_BLOCK),
TINYKTX_MEV(ASTC_5x5_SRGB_BLOCK),
TINYKTX_MEV(ASTC_6x5_UNORM_BLOCK),
TINYKTX_MEV(ASTC_6x5_SRGB_BLOCK),
TINYKTX_MEV(ASTC_6x6_UNORM_BLOCK),
TINYKTX_MEV(ASTC_6x6_SRGB_BLOCK),
TINYKTX_MEV(ASTC_8x5_UNORM_BLOCK),
TINYKTX_MEV(ASTC_8x5_SRGB_BLOCK),
TINYKTX_MEV(ASTC_8x6_UNORM_BLOCK),
TINYKTX_MEV(ASTC_8x6_SRGB_BLOCK),
TINYKTX_MEV(ASTC_8x8_UNORM_BLOCK),
TINYKTX_MEV(ASTC_8x8_SRGB_BLOCK),
TINYKTX_MEV(ASTC_10x5_UNORM_BLOCK),
TINYKTX_MEV(ASTC_10x5_SRGB_BLOCK),
TINYKTX_MEV(ASTC_10x6_UNORM_BLOCK),
TINYKTX_MEV(ASTC_10x6_SRGB_BLOCK),
TINYKTX_MEV(ASTC_10x8_UNORM_BLOCK),
TINYKTX_MEV(ASTC_10x8_SRGB_BLOCK),
TINYKTX_MEV(ASTC_10x10_UNORM_BLOCK),
TINYKTX_MEV(ASTC_10x10_SRGB_BLOCK),
TINYKTX_MEV(ASTC_12x10_UNORM_BLOCK),
TINYKTX_MEV(ASTC_12x10_SRGB_BLOCK),
TINYKTX_MEV(ASTC_12x12_UNORM_BLOCK),
TINYKTX_MEV(ASTC_12x12_SRGB_BLOCK),
} TinyKtx_Format;
#undef TINYKTX_MEV
#define TINYKTX_DEFINED
#endif
TinyKtx_Format TinyKtx_GetFormat(TinyKtx2_ContextHandle handle);
bool TinyKtx2_WriteImage(TinyKtx2_WriteCallbacks const *callbacks,
void *user,
uint32_t width,
uint32_t height,
uint32_t depth,
uint32_t slices,
uint32_t mipmaplevels,
TinyKtx_Format format,
bool cubemap,
uint32_t const *mipmapsizes,
void const **mipmaps);
#ifdef TINYKTX2_IMPLEMENTATION
typedef struct TinyKtx2_KeyValuePair {
uint32_t size;
} TinyKtx2_KeyValuePair; // followed by at least size bytes (aligned to 4)
typedef struct TinyKtx2_HeaderV2 {
uint8_t identifier[12];
TinyKtx_Format vkFormat;
uint32_t pixelWidth;
uint32_t pixelHeight;
uint32_t pixelDepth;
uint32_t arrayElementCount;
uint32_t faceCount;
uint32_t levelCount;
uint32_t supercompressionScheme;
uint32_t dfdByteOffset;
uint32_t dfdByteLength;
uint32_t kvdByteOffset;
uint32_t kvdByteLength;
uint64_t sgdByteOffset;
uint64_t sgdByteLength;
} TinyKtx2_Header;
typedef struct TinyKtx2_Level {
uint64_t byteOffset;
uint64_t byteLength;
uint64_t uncompressedByteLength;
} TinyKtx2_Level;
typedef enum TinyKtx2_SuperCompressionScheme {
TKTX2_SUPERCOMPRESSION_NONE = 0,
TKTX2_SUPERCOMPRESSION_CRN = 1,
TKTX2_SUPERCOMPRESSION_ZLIB = 2,
TKTX2_SUPERCOMPRESSION_ZSTD = 3,
} TinyKtx2_SuperCompressionScheme;
typedef struct TinyKtx2_Context {
TinyKtx2_Callbacks callbacks;
void *user;
uint64_t headerPos;
uint64_t firstImagePos;
TinyKtx2_Header header;
TinyKtx2_KeyValuePair const *keyData;
bool headerValid;
bool sameEndian;
void* sgdData;
TinyKtx2_Level levels[TINYKTX_MAX_MIPMAPLEVELS];
uint8_t const *mipmaps[TINYKTX_MAX_MIPMAPLEVELS];
} TinyKtx2_Context;
static uint8_t TinyKtx2_fileIdentifier[12] = {
0xAB, 0x4B, 0x54, 0x58, 0x20, 0x32, 0x30, 0xBB, 0x0D, 0x0A, 0x1A, 0x0A
};
static void TinyKtx2_NullErrorFunc(void *user, char const *msg) {}
TinyKtx2_ContextHandle TinyKtx2_CreateContext(TinyKtx2_Callbacks const *callbacks, void *user) {
TinyKtx2_Context *ctx = (TinyKtx2_Context *) callbacks->alloc(user, sizeof(TinyKtx2_Context));
if (ctx == NULL)
return NULL;
memset(ctx, 0, sizeof(TinyKtx2_Context));
memcpy(&ctx->callbacks, callbacks, sizeof(TinyKtx2_Callbacks));
ctx->user = user;
if (ctx->callbacks.error == NULL) {
ctx->callbacks.error = &TinyKtx_NullErrorFunc;
}
if (ctx->callbacks.read == NULL) {
ctx->callbacks.error(user, "TinyKtx must have read callback");
return NULL;
}
if (ctx->callbacks.alloc == NULL) {
ctx->callbacks.error(user, "TinyKtx must have alloc callback");
return NULL;
}
if (ctx->callbacks.free == NULL) {
ctx->callbacks.error(user, "TinyKtx must have free callback");
return NULL;
}
if (ctx->callbacks.seek == NULL) {
ctx->callbacks.error(user, "TinyKtx must have seek callback");
return NULL;
}
if (ctx->callbacks.tell == NULL) {
ctx->callbacks.error(user, "TinyKtx must have tell callback");
return NULL;
}
TinyKtx2_Reset(ctx);
return ctx;
}
void TinyKtx2_DestroyContext(TinyKtx2_ContextHandle handle) {
TinyKtx2_Context *ctx = (TinyKtx2_Context *) handle;
if (ctx == NULL)
return;
TinyKtx2_Reset(handle);
ctx->callbacks.free(ctx->user, ctx);
}
void TinyKtx2_Reset(TinyKtx2_ContextHandle handle) {
TinyKtx2_Context *ctx = (TinyKtx2_Context *) handle;
if (ctx == NULL)
return;
// backup user provided callbacks and data
TinyKtx2_Callbacks callbacks;
memcpy(&callbacks, &ctx->callbacks, sizeof(TinyKtx_Callbacks));
void *user = ctx->user;
// free any super compression global data we've allocated
if (ctx->sgdData != NULL) {
callbacks.free(user, (void *) ctx->sgdData);
}
// free memory of sub data
if (ctx->keyData != NULL) {
callbacks.free(user, (void *) ctx->keyData);
}
for (int i = 0; i < TINYKTX_MAX_MIPMAPLEVELS; ++i) {
if (ctx->mipmaps[i] != NULL) {
callbacks.free(user, (void *) ctx->mipmaps[i]);
}
}
// reset to default state
memset(ctx, 0, sizeof(TinyKtx_Context));
memcpy(&ctx->callbacks, &callbacks, sizeof(TinyKtx_Callbacks));
ctx->user = user;
}
bool TinyKtx2_ReadHeader(TinyKtx_ContextHandle handle) {
TinyKtx2_Context *ctx = (TinyKtx2_Context *) handle;
if (ctx == NULL)
return false;
ctx->headerPos = ctx->callbacks.tell(ctx->user);
ctx->callbacks.read(ctx->user, &ctx->header, sizeof(TinyKtx2_Header));
if (memcmp(&ctx->header.identifier, TinyKtx2_fileIdentifier, 12) != 0) {
ctx->callbacks.error(ctx->user, "Not a KTX V2 file or corrupted as identified isn't valid");
return false;
}
if (ctx->header.faceCount != 1 && ctx->header.faceCount != 6) {
ctx->callbacks.error(ctx->user, "no. of Faces must be 1 or 6");
return false;
}
// cap level to max
if(ctx->header.levelCount >= TINYKTX2_MAX_MIPMAPLEVELS) {
ctx->header.levelCount = TINYKTX2_MAX_MIPMAPLEVELS;
}
// 0 level count means wants mip maps from the 1 stored
uint32_t const levelCount = ctx->header.levelCount ? ctx->header.levelCount : 1;
ctx->callbacks.read(ctx->user, &ctx->levels, sizeof(TinyKtx2_Header) * levelCount);
if(ctx->header.sgdByteLength > 0) {
ctx->sgdData = ctx->callbacks.alloc(ctx->user, ctx->header.sgdByteLength);
ctx->callbacks.seek(ctx->user, ctx->header.sgdByteOffset);
ctx->callbacks.read(ctx->user, ctx->sgdData, ctx->header.sgdByteLength);
}
return true;
}
bool TinyKtx2_GetValue(TinyKtx2_ContextHandle handle, char const *key, void const **value) {
TinyKtx2_Context *ctx = (TinyKtx2_Context *) handle;
if (ctx == NULL)
return false;
if (ctx->headerValid == false) {
ctx->callbacks.error(ctx->user, "Header data hasn't been read yet or its invalid");
return false;
}
if (ctx->keyData == NULL) {
ctx->callbacks.error(ctx->user, "No key value data in this KTX");
return false;
}
TinyKtx2_KeyValuePair const *curKey = ctx->keyData;
while (((uint8_t *) curKey - (uint8_t *) ctx->keyData) < ctx->header.bytesOfKeyValueData) {
char const *kvp = (char const *) curKey;
if (strcmp(kvp, key) == 0) {
size_t sl = strlen(kvp);
*value = (void const *) (kvp + sl);
return true;
}
curKey = curKey + ((curKey->size + 3u) & ~3u);
}
return false;
}
bool TinyKtx2_Is1D(TinyKtx2_ContextHandle handle) {
TinyKtx2_Context *ctx = (TinyKtx2_Context *) handle;
if (ctx == NULL)
return false;
if (ctx->headerValid == false) {
ctx->callbacks.error(ctx->user, "Header data hasn't been read yet or its invalid");
return false;
}
return (ctx->header.pixelHeight <= 1) && (ctx->header.pixelDepth <= 1 );
}
bool TinyKtx2_Is2D(TinyKtx_ContextHandle handle) {
TinyKtx2_Context *ctx = (TinyKtx2_Context *) handle;
if (ctx == NULL)
return false;
if (ctx->headerValid == false) {
ctx->callbacks.error(ctx->user, "Header data hasn't been read yet or its invalid");
return false;
}
return (ctx->header.pixelHeight > 1 && ctx->header.pixelDepth <= 1);
}
bool TinyKtx2_Is3D(TinyKtx2_ContextHandle handle) {
TinyKtx2_Context *ctx = (TinyKtx2_Context *) handle;
if (ctx == NULL)
return false;
if (ctx->headerValid == false) {
ctx->callbacks.error(ctx->user, "Header data hasn't been read yet or its invalid");
return false;
}
return (ctx->header.pixelHeight > 1 && ctx->header.pixelDepth > 1);
}
bool TinyKtx2_IsCubemap(TinyKtx2_ContextHandle handle) {
TinyKtx2_Context *ctx = (TinyKtx2_Context *) handle;
if (ctx == NULL)
return false;
if (ctx->headerValid == false) {
ctx->callbacks.error(ctx->user, "Header data hasn't been read yet or its invalid");
return false;
}
return (ctx->header.numberOfFaces == 6);
}
bool TinyKtx2_IsArray(TinyKtx2_ContextHandle handle) {
TinyKtx_Context *ctx = (TinyKtx2_Context *) handle;
if (ctx == NULL)
return false;
if (ctx->headerValid == false) {
ctx->callbacks.error(ctx->user, "Header data hasn't been read yet or its invalid");
return false;
}
return (ctx->header.numberOfArrayElements > 1);
}
bool TinyKtx2_Dimensions(TinyKtx2_ContextHandle handle,
uint32_t *width,
uint32_t *height,
uint32_t *depth,
uint32_t *slices) {
TinyKtx2_Context *ctx = (TinyKtx2_Context *) handle;
if (ctx == NULL)
return false;
if (ctx->headerValid == false) {
ctx->callbacks.error(ctx->user, "Header data hasn't been read yet or its invalid");
return false;
}
if (width)
*width = ctx->header.pixelWidth;
if (height)
*height = ctx->header.pixelWidth;
if (depth)
*depth = ctx->header.pixelDepth;
if (slices)
*slices = ctx->header.numberOfArrayElements;
return true;
}
uint32_t TinyKtx2_Width(TinyKtx2_ContextHandle handle) {
TinyKtx2_Context *ctx = (TinyKtx2_Context *) handle;
if (ctx == NULL)
return 0;
if (ctx->headerValid == false) {
ctx->callbacks.error(ctx->user, "Header data hasn't been read yet or its invalid");
return 0;
}
return ctx->header.pixelWidth;
}
uint32_t TinyKtx2_Height(TinyKtx2_ContextHandle handle) {
TinyKtx2_Context *ctx = (TinyKtx2_Context *) handle;
if (ctx == NULL)
return 0;
if (ctx->headerValid == false) {
ctx->callbacks.error(ctx->user, "Header data hasn't been read yet or its invalid");
return 0;
}
return ctx->header.pixelHeight;
}
uint32_t TinyKtx2_Depth(TinyKtx2_ContextHandle handle) {
TinyKtx2_Context *ctx = (TinyKtx2_Context *) handle;
if (ctx == NULL)
return 0;
if (ctx->headerValid == false) {
ctx->callbacks.error(ctx->user, "Header data hasn't been read yet or its invalid");
return 0;
}
return ctx->header.pixelDepth;
}
uint32_t TinyKtx2_ArraySlices(TinyKtx2_ContextHandle handle) {
TinyKtx2_Context *ctx = (TinyKtx2_Context *) handle;
if (ctx == NULL)
return 0;
if (ctx->headerValid == false) {
ctx->callbacks.error(ctx->user, "Header data hasn't been read yet or its invalid");
return 0;
}
return ctx->header.numberOfArrayElements;
}
uint32_t TinyKtx2_NumberOfMipmaps(TinyKtx2_ContextHandle handle) {
TinyKtx2_Context *ctx = (TinyKtx2_Context *) handle;
if (ctx == NULL)
return 0;
if (ctx->headerValid == false) {
ctx->callbacks.error(ctx->user, "Header data hasn't been read yet or its invalid");
return 0;
}
return ctx->header.levelCount ? ctx->header.levelCount : 1;
}
bool TinyKtx2_NeedsGenerationOfMipmaps(TinyKtx2_ContextHandle handle) {
TinyKtx2_Context *ctx = (TinyKtx2_Context *) handle;
if (ctx == NULL)
return false;
if (ctx->headerValid == false) {
ctx->callbacks.error(ctx->user, "Header data hasn't been read yet or its invalid");
return false;
}
return ctx->header.levelCount == 0;
}
uint32_t TinyKtx_ImageSize(TinyKtx_ContextHandle handle, uint32_t mipmaplevel) {
TinyKtx2_Context *ctx = (TinyKtx2_Context *) handle;
if (mipmaplevel >= ctx->header.levelCount) {
ctx->callbacks.error(ctx->user, "Invalid mipmap level");
return 0;
}
if (mipmaplevel >= TINYKTX2_MAX_MIPMAPLEVELS) {
ctx->callbacks.error(ctx->user, "Invalid mipmap level");
return 0;
}
return ctx->levels[mipmaplevel].uncompressedByteLength;
}
void const *TinyKtx2_ImageRawData(TinyKtx2_ContextHandle handle, uint32_t mipmaplevel) {
TinyKtx2_Context *ctx = (TinyKtx2_Context *) handle;
if (ctx == NULL)
return NULL;
if (ctx->headerValid == false) {
ctx->callbacks.error(ctx->user, "Header data hasn't been read yet or its invalid");
return NULL;
}
if (mipmaplevel >= ctx->header.levelCount) {
ctx->callbacks.error(ctx->user, "Invalid mipmap level");
return NULL;
}
if (mipmaplevel >= TINYKTX2_MAX_MIPMAPLEVELS) {
ctx->callbacks.error(ctx->user, "Invalid mipmap level");
return NULL;
}
if (ctx->mipmaps[mipmaplevel] != NULL)
return ctx->mipmaps[mipmaplevel];
TinyKtx_Level* lvl = &ctx->levels[mipmaplevel];
if (lvl->byteLength == 0 || lvl->uncompressedByteLength == 0)
return NULL;
// allocate decompressed buffer
ctx->mipmaps[mipmaplevel] = (uint8_t const*) ctx->callbacks.alloc(ctx->user, lvl->uncompressedByteLength);
if (ctx->mipmaps[mipmaplevel] == NULL)
return NULL;
// handle no super compression first (save an buffer allocation)
if(ctx->header.supercompressionScheme == TKTX2_SUPERCOMPRESSION_NONE) {
if(lvl->uncompressedByteLength != lvl->byteLength) {
ctx->callbacks.error(ctx->user, "mipmap image data has no super compression but compressed and uncompressed data sizes are different");
ctx->callbacks.free(ctx->user, (void*)ctx->mipmaps[mipmaplevel]);
return NULL;
}
ctx->callbacks.seek(ctx->user, lvl->byteOffset);
ctx->callbacks.read(ctx->user, (void *) ctx->mipmaps[mipmaplevel], lvl->byteLength);
return ctx->mipmaps[mipmaplevel];
}
// this data is super compressed, we need to see if the user provided a decompressor and if so use it
TinyKtx_SuperDecompress decompressor = NULL;
// see if the user provided the decompressor we need
for(size_t i = 0; i < ctx->callbacks.numSuperDecompressors;++i) {
if(ctx->callbacks.superDecompressors[i].superId == ctx->headerV2.supercompressionScheme) {
decompressor = ctx->callbacks.superDecompressors[i].decompressor;
}
}
if(decompressor == NULL) {
ctx->callbacks.error(ctx->user, "user did not provide a decompressor for use with this type of super decompressor");
ctx->callbacks.free(ctx->user, (void*)ctx->mipmaps[mipmaplevel]);
return NULL;
}
// read the compressed data into its own buffer (free once decompression has occured)
uint8_t const* compressedBuffer = (uint8_t const*)ctx->callbacks.alloc(ctx->user, lvl->byteLength);
if(compressedBuffer == NULL) {
ctx->callbacks.free(ctx->user, (void*)ctx->mipmaps[mipmaplevel]);
return NULL;
}
ctx->callbacks.seek(ctx->user, lvl->byteOffset);
ctx->callbacks.read(ctx->user, (void *) compressedBuffer, lvl->byteLength);
bool okay = decompressor(ctx->user, ctx->sgdData, compressedBuffer, lvl->byteLength, ctx->mipmaps[mipmaplevel], lvl->uncompressedByteLength);
if(!okay) {
ctx->callbacks.error(ctx->user, "user decompressor failed");
ctx->callbacks.free(ctx->user, (void *) compressedBuffer);
ctx->callbacks.free(ctx->user, (void *) ctx->mipmaps[mipmaplevel]);
return NULL;
}
ctx->callbacks.free(ctx->user, (void *) compressedBuffer);
return ctx->mipmaps[mipmaplevel];
}
TinyKtx_Format TinyKtx2_GetFormat(TinyKtx2_ContextHandle handle) {
TinyKtx2_Context *ctx = (TinyKtx2_Context *) handle;
if (ctx == NULL)
return TKTX_UNDEFINED;
if (ctx->headerValid == false) {
ctx->callbacks.error(ctx->user, "Header data hasn't been read yet or its invalid");
return TKTX_UNDEFINED;
}
// TODO handle DFD only described formats (VK_FORMAT_UNDEFINED)
return (TinyKtx_Format)ctx->header.vkFormat;
}
static uint32_t TinyKtx2_MipMapReduce(uint32_t value, uint32_t mipmaplevel) {
// handle 0 being passed in
if(value <= 1) return 1;
// there are better ways of doing this (log2 etc.) but this doesn't require any
// dependecies and isn't used enough to matter imho
for (uint32_t i = 0u; i < mipmaplevel;++i) {
if(value <= 1) return 1;
value = value / 2;
}
return value;
}
bool TinyKtx2_WriteImage(TinyKtx2_WriteCallbacks const *callbacks,
void *user,
uint32_t width,
uint32_t height,
uint32_t depth,
uint32_t slices,
uint32_t mipmaplevels,
TinyKtx_Format format,
bool cubemap,
uint32_t const *mipmapsizes,
void const **mipmaps) {
ASSERT(false);
}
#endif
#ifdef __cplusplus
};
#endif
#endif // end header
/*
MIT License
Copyright (c) 2019 DeanoC
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
<file_sep>#define TINYKTX_IMPLEMENTATION
#include "tiny_imageformat/tinyimageformat_base.h"
#include "tiny_ktx/tinyktx.h"
| 48a833a99f001aa6534733722f72b0ae431f8e25 | [
"Markdown",
"C++",
"C"
] | 5 | Markdown | ux3d/tiny_ktx | 579bae0728eb02536acea2228fb9acf6356e15d5 | aabbd8ac8e0b2a43b3c087cc5d951ad95cec089e |
refs/heads/main | <repo_name>kassmi1u/Pioneer_Simulation<file_sep>/code_Evitement_d_obstacle.py
import vrep
import math
import time
import numpy as np
def to_rad(deg):
return 2*math.pi*deg/360
def to_deg(rad):
return rad*360/(2*math.pi)
# simulation config
ip = '127.0.0.1'
port = 19997
scene = './pioneer.ttm'
position_init = [0,0,to_rad(0)]
print ('Program started')
vrep.simxFinish(-1) # just in case, close all opened connections
client_id=vrep.simxStart(ip,port,True,True,5000,5) # Connect to V-REP
if client_id!=-1:
print ('Connected to remote API server on %s:%s' % (ip, port))
res = vrep.simxLoadScene(client_id, scene, 1, vrep.simx_opmode_oneshot_wait)
res, pioneer = vrep.simxGetObjectHandle(client_id, 'Pioneer_p3dx', vrep.simx_opmode_oneshot_wait)
res, left_motor = vrep.simxGetObjectHandle(client_id, 'Pioneer_p3dx_leftMotor', vrep.simx_opmode_oneshot_wait)
res, right_motor = vrep.simxGetObjectHandle(client_id, 'Pioneer_p3dx_rightMotor', vrep.simx_opmode_oneshot_wait)
# For Sensors
sensor_handles=np.zeros(16)
sensors_handles=np.zeros(16)
detectStatus = np.zeros(16)
# Reading data for sensors
for i in range(1,17) :
res , sensor_handle = vrep.simxGetObjectHandle(client_id, "Pioneer_p3dx_ultrasonicSensor" + str(i), vrep.simx_opmode_blocking)
sensor_handles[i-1] = sensor_handle
res, detectionState, detectedPoint, detectedObjectHandle, detectedSurfaceNormalVector = vrep.simxReadProximitySensor(client_id, sensor_handle, vrep.simx_opmode_streaming)
#intial values of Robot Speed
v0 = 1.5
v_l = 0
v_r = 0
# Braitenberg Algorithm Parameters
maxDetectionRadius = 0.5
minSafetyDist = 0.2
braitenbergL = np.array([-0.2,-0.4,-0.6,-0.8,-1,-1.2,-1.4,-1.6, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
braitenbergR = np.array([-1.6,-1.4,-1.2,-1,-0.8,-0.6,-0.4,-0.2, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
simStatusCheck = vrep.simxStartSimulation(client_id, vrep.simx_opmode_oneshot)
continue_running = True
while(continue_running):
for i in range(1,17) :
res, detectionState, detectedPoint, detectedObjectHandle, detectedSurfaceNormalVector = vrep.simxReadProximitySensor(client_id, int(sensor_handles[i-1]), vrep.simx_opmode_buffer)
distToObject = math.sqrt(math.pow(detectedPoint[0], 2) + math.pow(detectedPoint[1], 2) + math.pow(detectedPoint[2], 2)) # Calculate distance to obstacle relative to each sensor
if (detectionState == True) and (distToObject < maxDetectionRadius):
if (distToObject < minSafetyDist):
distToObject = minSafetyDist
detectStatus[i-1] = 1-((distToObject - minSafetyDist)/(maxDetectionRadius - minSafetyDist))
else:
detectStatus[i-1] = 0
v_l = v0
v_r = v0
for i in range(1,17):
v_l = v_l + braitenbergL[i-1] * detectStatus[i-1]
v_r = v_r + braitenbergR[i-1] * detectStatus[i-1]
res = vrep.simxSetJointTargetVelocity(client_id, left_motor, v_l, vrep.simx_opmode_oneshot)
res = vrep.simxSetJointTargetVelocity(client_id, right_motor, v_r, vrep.simx_opmode_oneshot)
#Terminate
vrep.simxStopSimulation(client_id, vrep.simx_opmode_oneshot_wait)
vrep.simxFinish(client_id)
else:
print('Unable to connect to %s:%s' % (ip, port))
<file_sep>/code_Navigation_vers_but_+_Evitement_d_obtacle.py
import vrep
import math
import time
import numpy as np
def to_rad(deg):
return 2*math.pi*deg/360
def to_deg(rad):
return rad*360/(2*math.pi)
# simulation config
ip = '127.0.0.1'
port = 19997
scene = './pioneer.ttm'
position_init = [0,0,to_rad(0)]
position_init1 = [3,3,to_rad(3)]
print ('Program started')
vrep.simxFinish(-1) # just in case, close all opened connections
client_id=vrep.simxStart(ip,port,True,True,5000,5) # Connect to V-REP
if client_id!=-1:
print ('Connected to remote API server on %s:%s' % (ip, port))
res = vrep.simxLoadScene(client_id, scene, 1, vrep.simx_opmode_oneshot_wait)
res, pioneer = vrep.simxGetObjectHandle(client_id, 'Pioneer_p3dx', vrep.simx_opmode_oneshot_wait)
res, left_motor = vrep.simxGetObjectHandle(client_id, 'Pioneer_p3dx_leftMotor', vrep.simx_opmode_oneshot_wait)
res, right_motor = vrep.simxGetObjectHandle(client_id, 'Pioneer_p3dx_rightMotor', vrep.simx_opmode_oneshot_wait)
res, wall = vrep.simxGetObjectHandle(client_id, '20cmHighWall50cm', vrep.simx_opmode_oneshot_wait)
# For Sensors
tmp=np.zeros(16) #For robot position
tmp2=np.zeros(16) # For Goal position
sensor_handles=np.zeros(16)
detectStatus = np.zeros(16)
# Reading data for sensors
for i in range(1,17) :
res , sensor_handle = vrep.simxGetObjectHandle(client_id, "Pioneer_p3dx_ultrasonicSensor" + str(i), vrep.simx_opmode_blocking)
sensor_handles[i-1] = sensor_handle
res, detectionState, detectedPoint, detectedObjectHandle, detectedSurfaceNormalVector = vrep.simxReadProximitySensor(client_id, sensor_handle, vrep.simx_opmode_streaming)
#intial values of Robot Speed
v0 = 1 # Linear Speed of the robot
w0 = 0 # Angular Speed of the robot
k1=0.1
k2=0.1
# Braitenberg Algorithm Parameters
maxDetectionRadius = 0.3
SafetyDist = 0.2
braitenbergL = np.array([-0.2,-0.4,-0.6,-0.8,-1,-1.2,-1.4,-1.6, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
braitenbergR = np.array([-1.6,-1.4,-1.2,-1,-0.8,-0.6,-0.4,-0.2, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
simStatusCheck = vrep.simxStartSimulation(client_id, vrep.simx_opmode_oneshot)
# Other parameters
state = 0
Robot_position = position_init
Goal_position = position_init1
thresold_distance = 0.5
# Goal position
res, tmp2 = vrep.simxGetObjectPosition(client_id,wall,-1, vrep.simx_opmode_oneshot_wait)
Goal_position[0]= tmp2[0]
Goal_position[1]= tmp2[1]
# Main programm
continue_running = True
while(continue_running):
# Detecting obstacles
for i in range(1,17) :
res, detectionState, detectedPoint, detectedObjectHandle, detectedSurfaceNormalVector = vrep.simxReadProximitySensor(client_id, int(sensor_handles[i-1]), vrep.simx_opmode_buffer)
distObject = math.sqrt(math.pow(detectedPoint[0], 2) + math.pow(detectedPoint[1], 2) + math.pow(detectedPoint[2], 2)) # Calculate distance to obstacle relative to each sensor
if (detectionState == True) and (distObject < maxDetectionRadius):
if (distObject < SafetyDist):
distObject = SafetyDist
detectStatus[i-1] = 1-((distObject - SafetyDist)/(maxDetectionRadius - SafetyDist))
state = 1
else:
detectStatus[i-1] = 0
# Robot position
res, tmp = vrep.simxGetObjectPosition(client_id,pioneer,-1, vrep.simx_opmode_oneshot_wait)
Robot_position[0]= tmp[0] #X_r
Robot_position[1]= tmp[1] #Y_r
res, tmp = vrep.simxGetObjectOrientation(client_id, pioneer, -1, vrep.simx_opmode_oneshot_wait)
Robot_position[2] = tmp[2] # en radian
#Distance to Goal
d = math.sqrt(math.pow(Goal_position[0] - Robot_position[0],2) + math.pow(Goal_position[1]- Robot_position[1],2))
Goal_teta = math.atan((Goal_position[1] - Robot_position[1])/(Goal_position[0] - Robot_position[0]))
delta_teta = Robot_position[2] - Goal_teta
w0 = -delta_teta
# Wheel speeds if no obstacle is near the robot
v_left = ((v0/(2*k1) - w0/(2*k2)))
v_right = ((v0/(2*k2)) + w0/(2*k1))
# Wheels Velocity if there is an obstacle near the robot
if state == 1 :
# adjust wheel speeds
v_left = v0
v_right = v0
# braitenberg vehicle
for i in range(1,17):
v_left = v_left + braitenbergL[i-1] * detectStatus[i-1]
v_right = v_right + braitenbergR[i-1] * detectStatus[i-1]
state = 0
res = vrep.simxSetJointTargetVelocity(client_id, left_motor, v_left, vrep.simx_opmode_oneshot)
res = vrep.simxSetJointTargetVelocity(client_id, right_motor, v_right, vrep.simx_opmode_oneshot)
# cancel the speed and stop the simulation if the robot has reached the objective
if (d<=thresold_distance):
res = vrep.simxSetJointTargetVelocity(client_id, left_motor, 0, vrep.simx_opmode_oneshot)
res = vrep.simxSetJointTargetVelocity(client_id, right_motor, 0, vrep.simx_opmode_oneshot)
print("Robot reached the goal ")
continue_running = False
#Terminate
vrep.simxStopSimulation(client_id, vrep.simx_opmode_oneshot_wait)
vrep.simxFinish(client_id)
else:
print('Unable to connect to %s:%s' % (ip, port))
| aea0c32d0d21f9ae84502b883789134a295bd21e | [
"Python"
] | 2 | Python | kassmi1u/Pioneer_Simulation | 5affbfd2f612db2132d5aa302017e5f2fd49d876 | b55c1340aa3ad284474915b8897a5122103a0715 |
refs/heads/master | <file_sep>My name is <NAME>
I like to watch and play football and basketball.
My favorite team is the Portland Trail Blazers
I also like to code in my spare time.
My favorite food is bbq. My favorite bbq meats are
brisket, pulled pork, sausage, ribs in that order.
My favorite activities are skiing, hiking, rafting,
and jogging. I love the outdoors.
Programming Background
I have taken courses in C, C++, Assembly Language using MASM,
Data Structures, and Usability Engineering
I am proficient in C, C++, and MASM. My favorite project so far
has been creating my own spreadsheet program in C++
| c5b96a16fa3bab8452ef40eab81ef02413747dac | [
"Markdown"
] | 1 | Markdown | gadirajv/assignment1 | fb98a87ad31adeca1d058083cceb26840d794a53 | 8fe30155a0b4afdce2f613917680db9329d3838c |
refs/heads/main | <repo_name>evofan/test_react_easy_sample<file_sep>/chap5/money_book/src/index_5-2-6.js
import React from 'react';
import ReactDOM from 'react-dom';
import PropTypes from 'prop-types';// ■コンポーネントに渡す引数をチェックしてくれるモジュール
import './index.css';
const MoneyBook = () => {
const books = [
{ date: "1/1", item: "お年玉", amount: 10000 },
{ date: "1/3", item: "ケーキ", amount: -500 },
{ date: "2/1", item: "小遣い", amount: 3000 },
{ date: "2/5", item: "漫画", amount: -600 }
]
return (
<div>
<h1>小遣い帳</h1>
<table className="book">
<thead>
<tr><th>日付</th><th>項目</th><th>入金</th><th>出金</th></tr>
</thead>
<tbody>
{books.map((book, idx) => {
return <MoneyBookItem book={book} key={idx + book.date + book.item} />
})
}
</tbody>
</table>
</div>
)
};
const MoneyBookItem = (props) => { // ■MoneyBookItemコンポーネントの定義、パラメーターをpropsで受け取る
const { date, item, amount } = props.book;
// ■↓と同じ
// const data = props.book.data;
// const item = props.book.item;
// const amount = props.book.amount;
/*
if (amount > 0) { // ■入金と出金で表示する列が違うので、条件分岐で表示するJSXを分ける
return (
<tr><td>{date}</td><td>{item}</td><td>{amount}</td><td></td></tr>
)
} else {
return (
<tr><td>{date}</td><td>{item}</td><td></td><td>{-amount}</td></tr>
)
}
*/
return (
<tr><td>{date}</td>
<td>{item}</td>
<td>{amount >= 0 ? amount : null}</td>
<td>{amount < 0 ? -amount : null}</td>
</tr>
)
};
MoneyBookItem.propTypes = { // ■MoneyBookItemコンポーネントに渡すパラメーターの型チェック、コンパイル時のエラー検出の為入れる事を推奨
book: PropTypes.object.isRequired // ■object型のbookが渡ってくると定義、isRequiredが付いてるのでbookパラメーターが渡って来ない場合はconsole上にエラー表示
};
ReactDOM.render(
<MoneyBook />,
document.getElementById('root')
);
<file_sep>/chap2/npm_start.bat
@start chrome --incognito "http://localhost:8080/index.html"
npm start<file_sep>/chap8/jyanken/test/test.js
import assert from "assert";
it("1+1は2である", () => { // ■it()はテストを行う関数、it(テストの説明文字列、テストコードの無名関数)
// ■assert.equal(テスト売る式・値, 正解の式・値)
assert.equal(1 + 1, 2); // ok
// assert.equal(1 + 1, 3); // err
});
<file_sep>/README.md
# Test React.js using the introductory book.
<img src="https://evofan.github.io/test_react_easy_sample/chap3/pic_npm_list.jpg" width="50%">
<img src="https://evofan.github.io/test_react_easy_sample/chap5/pic_screenshot_5-2-2.jpg" width="25%">
<img src="https://evofan.github.io/test_react_easy_sample/chap6/pic_error2.jpg" width="50%">
<img src="https://evofan.github.io/test_react_easy_sample/chap6/pic_jyanken.jpg" width="25%">
<img src="https://evofan.github.io/test_react_easy_sample/chap6/pic_controlled.jpg" width="25%">
<img src="https://evofan.github.io/test_react_easy_sample/chap6/pic_fig1.png" width="50%">
<img src="https://evofan.github.io/test_react_easy_sample/chap7/pic_jyanken_material_ui.png" width="25%">
<img src="https://evofan.github.io/test_react_easy_sample/chap7/pic_err1.jpg" width="50%">7-2 Jyanken Router sample
<img src="https://evofan.github.io/test_react_easy_sample/chap7/pic_err2.jpg" width="50%">7-3 Weather API sample
<img src="https://evofan.github.io/test_react_easy_sample/chap8/pic_assert.jpg" width="50%">8-1 assert test
<img src="https://evofan.github.io/test_react_easy_sample/chap8/pic_err3.jpg" width="50%">
<img src="https://evofan.github.io/test_react_easy_sample/chap8/pic_err4.jpg" width="50%">
reference
**作りながら学ぶ React入門**
[https://www.amazon.co.jp/dp/479805075X/](https://www.amazon.co.jp/dp/479805075X/)
**yuumi3/react_book**
[https://github.com/yuumi3/react_book](https://github.com/yuumi3/react_book)
**React Developer Tools - Chrome ウェブストア**
[https://chrome.google.com/webstore/detail/react-developer-tools/fmkadmapgofadopljbjfkapdkoienihi/related](https://chrome.google.com/webstore/detail/react-developer-tools/fmkadmapgofadopljbjfkapdkoienihi/related)
**React lifecycle methods diagram**
[https://projects.wojtekmaj.pl/react-lifecycle-methods-diagram/](https://projects.wojtekmaj.pl/react-lifecycle-methods-diagram/)
**brillout/awesome-react-components**
[https://github.com/brillout/awesome-react-components](https://github.com/brillout/awesome-react-components)
**Material-UI: A popular React UI framework**
[https://material-ui.com/ja/](https://material-ui.com/ja/)
**REACT ROUTER**
[https://reactrouter.com/](https://reactrouter.com/)
**DeprecationWarning: "--compilers" will be removed in a future version of Mocha; #42**
[https://github.com/power-assert-js/espower-typescript/issues/42](https://github.com/power-assert-js/espower-typescript/issues/42)
**Assert | Node.js v15.4.0 Documentation**
[https://nodejs.org/api/assert.html](https://nodejs.org/api/assert.html)
deepEqual()や、strictEqual等
<file_sep>/chap5/money_book/src/index_5-2-2.js
import React from 'react';
import ReactDOM from 'react-dom';
import './index.css';
const MoneyBook = () => {
return(
<div>
<h1>小遣い帳</h1>
<table className="book">
<thead>
<tr><th>日付</th><th>項目</th><th>入金</th><th>出金</th></tr>
</thead>
<tbody>
<tr><td>1/1</td><td>お年玉</td><td>10000</td><td></td></tr>
<tr><td>1/3</td><td>ケーキ</td><td></td><td>500</td></tr>
<tr><td>2/1</td><td>小遣い</td><td>3000</td><td></td></tr>
<tr><td>2/5</td><td>漫画</td><td></td><td>600</td></tr>
</tbody>
</table>
</div>
)
};
ReactDOM.render(
<MoneyBook />,
document.getElementById('root')
);
<file_sep>/chap5/money_book/src/index_5-2-3.js
import React from 'react';
import ReactDOM from 'react-dom';
import './index.css';
const MoneyBook = () => {
const books = [
{ data: "1/1", item: "お年玉", amount: 10000 },
{ data: "1/3", item: "ケーキ", amount: -500 },
{ data: "2/1", item: "小遣い", amount: 3000 },
{ data: "2/5", item: "漫画", amount: -600 },
]
return (
<div>
<h1>小遣い帳</h1>
<table className="book">
<thead>
<tr><th>日付</th><th>項目</th><th>入金</th><th>出金</th></tr>
</thead>
<tbody>
<tr><td>{books[0].data}</td><td>{books[0].item}</td><td>{books[0].amount}</td><td></td></tr>
<tr><td>{books[1].data}</td><td>{books[1].item}</td><td></td><td>{-books[1].amount}</td></tr>
<tr><td>{books[2].data}</td><td>{books[2].item}</td><td>{books[2].amount}</td><td></td></tr>
<tr><td>{books[3].data}</td><td>{books[3].item}</td><td></td><td>{-books[3].amount}</td></tr>
</tbody>
</table>
</div>
)
};
ReactDOM.render(
<MoneyBook />,
document.getElementById('root')
);
<file_sep>/chap8/jyanken/test/jyanken.js
import assert from "assert";
import Jyanken from "../src/Jyanken";
describe("Jyanken", () => {
const jyanken = new Jyanken();
describe("勝敗の判定が正しいか?", () => {
describe("コンピューターがグーの場合", () => {
it("人間がグーの場合は引き分け", () => {
jyanken.pon(0, 0);
assert.equal(jyanken.getScores()[0].judgment, 0);
});
it("人間がチョキの場合は負け", () => {
jyanken.pon(1, 0);
assert.equal(jyanken.getScores()[0].judgment, 2);
});
it("人間がパーの場合は勝ち", () => {
jyanken.pon(2, 0);
assert.equal(jyanken.getScores()[0].judgment, 1);
});
});
describe("コンピューターがチョキの場合", () => {
it("人間がグーの場合は勝ち", () => {
jyanken.pon(0, 1);
assert.equal(jyanken.getScores()[0].judgment, 1);
});
it("人間がチョキの場合は引き分け", () => {
jyanken.pon(1, 1);
assert.equal(jyanken.getScores()[0].judgment, 0);
});
it("人間がパーの場合は負け", () => {
jyanken.pon(2, 1);
assert.equal(jyanken.getScores()[0].judgment, 2);
});
});
describe("コンピューターがパーの場合", () => {
it("人間がグーの場合は負け", () => {
jyanken.pon(0, 2);
assert.equal(jyanken.getScores()[0].judgment, 2);
});
it("人間がチョキの場合は勝ち", () => {
jyanken.pon(1, 2);
assert.equal(jyanken.getScores()[0].judgment, 1);
});
it("人間がパーの場合は引き分け", () => {
jyanken.pon(2, 2);
assert.equal(jyanken.getScores()[0].judgment, 0);
});
});
});
});
| 7cd5069999e8241498aa76dc40f5261aec81074e | [
"Markdown",
"Batchfile",
"JavaScript"
] | 7 | Markdown | evofan/test_react_easy_sample | 316c9c4826f712de1085b5d1a7fc2dfb3128db1c | 57ae43c77ec234f0ab6920d9b394018ef2177c6c |
refs/heads/master | <file_sep>Ansible Role System Defaults
=======================================
My default apps and configuration stuff I do not want to miss.
Supported Distributions
-----------------------
- Gentoo
Role Variables
--------------
None.
Dependencies
------------
[vundb/ansible-role-portage](https://github.com/vundb/ansible-role-portage)
Example Playbook
----------------
```
- hosts: all
roles:
- role: vundb-system-defaults
```
License
-------
MIT
Author Information
------------------
- You can find more roles in my GitHub channel [vundb](https://github.com/vundb)
- Follow me on Twitter [@vundb](https://twitter.com/vundb)
<file_sep>---
# fail when distribution is not supported
- name: check if distribution is supported
fail: msg="'{{ ansible_distribution }}' is not supported by this role"
when: ansible_distribution not in ["Gentoo"]
# install default packages
- import_tasks: ../../vundb-portage/tasks/gentoo/main.yml
vars:
portage_configuration:
- section: "package.use"
file: "git"
entries:
- "dev-vcs/git -perl -gpg -webdav"
portage_packages:
- { package: "app-admin/sudo" }
- { package: "app-editors/vim" }
- { package: "dev-vcs/git" }
- { package: "sys-process/htop" }
- { package: "app-text/tree" }
| e8f1c85d2e057fbcb64168f49bb4805b5152b73f | [
"Markdown",
"YAML"
] | 2 | Markdown | vundb/ansible-role-system-defaults | 46f7dd52aa2bb043b928bf5dc1091e8309c11261 | c5b29906c2a959717edbfbcef57c2a69a2243517 |
refs/heads/master | <file_sep>test1
=====
test
sssss
sdfsdfsdfsdffslll | e9333679e64f88adb2f8652b0e397855daf060fd | [
"Markdown"
] | 1 | Markdown | SongPing/test1 | dc96fc43638b22fe73dbd9dfb3916fce0e46aaa8 | 9a42f80adfe46aad3a7ef801c12b23d8c4dc610a |
refs/heads/master | <file_sep>import glob
import os
IN_DATASETS = ['antarctica_out_002_abf_noadmu_2000_ecor',
'redist_reload_antarctica_out_0A002_abf_admu_2000_ecor',
'antarctica_out_102_abf_noadmu_2040_ecor',
'redist_reload_antarctica_out_0A102_abf_admu_2040_ecor']
OUT_DATASETS = ['cbig_gpan_global2000_r16',
'cbig_gpan_national2000_r16',
'cbig_gpan_global2040_r16',
'cbig_gpan_national2040_r16']
DATASETS_EXT = ['rank.compressed.tif', 'curves.txt', 'features_info.txt',
'grp_curves.txt', 'png', 'run_info.txt']
rule all:
input:
expand("data/{dataset}/{dataset}.{ext}", dataset=OUT_DATASETS,
ext=DATASETS_EXT)
rule copy_data:
input:
expand("org/{dataset}/{dataset}.CAZ_MDE.{ext}", dataset=IN_DATASETS,
ext=DATASETS_EXT)
params:
data_dir="data/wood_productions_maps"
output:
rules.all.input
log:
"log/copy_data.log"
message:
"Copying files"
run:
for i, datafile in enumerate(input):
shell("cp {0} {1} >& {2}".format(datafile, output[i], log))
<file_sep>Name: cbig_gpan_r16
Version: 1.0.0
Date-created: 2016-09-28
Date-modified: 2016-09-28
GitHub: https://github.com/VUEG/data-cbig/tree/master/pouzols_et_al_2014_data
Metadata authors:
+ <NAME> <<EMAIL>>
+ <NAME> <<EMAIL>>
## DATA DESCRIPTION
This data collectio contains the following datasets:
1. cbig_gpan_global2000_r16
2. cbig_gpan_national2000_r16
3. cbig_gpan_global2040_r16
4. cbig_gpan_national2040_r16
Each of the datasets includes the following files:
### 1. Performance curves data
`data/*/*.curves.txt`, contains the performance curves data
of the analysis. This is the standard curves file output produced by Zonation,
for further details see Zonation manual [1]. The TSV file has the following
columns:
1. proportion of landscape lost
2. the cost of the remaining landscape. If land costs are not included in
the analysis, this column represents the number of cells remaining in the
landscape.
3. the lowest biodiversity feature distribution proportion remaining in the
landscape (i.e., the situation of the worst-off features).
4. the average proportion remaining over all features.
5. the weighted average proportion of all features.
6. the average extinction risk of biodiversity features as landscape is
iteratively removed (as calculated from the species-area relation using
parameter z).
7. the weighted extinction risk where species-area extinction risk has been
weighted by the feature weights.
8...N the proportion of distribution remaining for each feature
{1, 2, ..., N} in the same order as the features are listed in the
beginning of the file.
### 2. Feature info
`data/*/*.features_info.txt` is a text file containing a
list of the biodiversity features and the relative weights (Weight) used in the
analysis. This file also shows the initial sum of feature distributions
(distribution_sum)and the level of cell removal at which point targets for
particular feature have been violated. The initial sum of distribution is simply
the sum of each feature's local occurrence levels throughout the landscape. For
example, if the biodiversity feature data is in probabilities of occurrence,
this is the sum of probabilities in all cells before any landscape has been
removed.
### 3. Grouped performance curves data
`data/*/*.txt` contains representation curves
for minimum, mean, weighted mean, and maximum representation as well as weighted
extinction risk during the course of cell removal for each group. The second
column of this file specifies the solution cost (duplicated from the global
.curves.txt file). In the per group weighted extinction risk columns, the
species-area extinction risks are weighted using the weights of the features
belonging to each particular group, similar to the ext2 column of the global
.curves.txt file.
### 4. Image file
`data/*/*.png` is an image of the map of the area illustrating
the Zonation results, ranked by using different colors to indicate the
biological value of the site. Here the best areas are displayed in red, the
worst areas in black, and the "no data" areas in white.
### 5. Rank priority map data
`data/*/*.rank.compressed.tif` is the resulting priority
raster map in GeoTIFF format. The pixel values range between 0 and 1
representing the priority of the pixel in the priority ranking produced by
Zonation, higher values indicating higher priority. Values equal and above 0.83
represent the best 17 % of the terrestrial land for protection, including the
current protected areas. For uncertainty of these values, see the sections 3.4
and 3.7 in the Supplementary material of the article [2].
Technical details of the raster data:
Resolution: 0.017 degrees, equalling approximately 1.7 km at the Equator
Columns & rows: 21600, 10800
Number of bands: 1
Pixel type: floating point, 32 bit
Range: 0 to 1
Nodata value: -1
Coordinate Reference System: WGS84 (EPSG:4326)
### 6. Run info file
`data/*/*.run_info.txt` is a log file. The content of the
.run_info.txt file is identical to that of the .txt file.
## PROVENANCE INFORMATION
resource: pouzols_et_al_2014_data.tar.gz
retrieved_from: cbig-arnold
retrieved_date: 2016-09-28
retrieved_by: <NAME> <<EMAIL>>
modified_date: 2016-09-284
modified_by: <NAME> <<EMAIL>>
provenance_trail:
1. On cbig-arnold
```
mkdir Data/pouzols_et_al_2014_data
cp -r Data/DATAPART1/Zprojects/GNoTPA-expansion/GNoTPA_output/antarctica_out_002_abf_noadmu_2000_ecor/ Data/pouzols_et_al_2014_data/
cp -r Data/DATAPART1/Zprojects/GNoTPA-expansion/GNoTPA_output/antarctica_out_102_abf_noadmu_2040_ecor/ Data/pouzols_et_al_2014_data/
cp -r Data/DATAPART1/Zprojects/GNoTPA-expansion/GNoTPA_output_admu/redist_reload_antarctica_out_0A002_abf_admu_2000_ecor/ Data/pouzols_et_al_2014_data/
rm -r Data/pouzols_et_al_2014_data/redist_reload_antarctica_out_0A002_abf_admu_2000_ecor/redist_reload_antarctica_out_0A002_abf_admu_2000_ecor.CAZ_MDE.rank.per_ADMU_outputs/
cp -r Data/DATAPART1/Zprojects/GNoTPA-expansion/GNoTPA_output_admu/redist_reload_antarctica_out_0A102_abf_admu_2040_ecor/ Data/pouzols_et_al_2014_data/
rm -r Data/pouzols_et_al_2014_data/redist_reload_antarctica_out_0A102_abf_admu_2040_ecor/redist_reload_antarctica_out_0A102_abf_admu_2040_ecor.CAZ_MDE.rank.per_ADMU_outputs/
```
2. See [Snakefile](Snakefile) for additional processing steps.
## DESCRIPTION OF THE ANALYSES
The analysis identifies the priorities for expanding the current protected area
network to 17 % of the terrestrial land - and beyond. It compares the spatial
patterns and the performance of the prioritizations carried out globally and
nationally (using database of Global Administrative Areas [4]), limiting the
species ranges with present and future (2040) land use [5].
The analyses are based on the distributions of 24,757 terrestrial vertebrate
species and 826 ecoregions provided by IUCN Red List [6] and WWF Terrestrial
ecoregions [7]. The current protected area network was extracted from World
Database on Protected Areas [8] by selecting those polygons that belonged to
IUCN categories I to VI and had the status "designated". The analysis was
carried out using Zonation software [6] developed in the Conservation Biology
Informatics Group [9]. For full methodology and references, see the
supplementary material of the article [2].
## LICENCE
The data is licensed with Creative Commons Attribution 4.0 International (CC BY
4.0) lisence, see https://creativecommons.org/licenses/by/4.0/
## CITATION
The data set is an output of spatial conservation prioritization carried out for
global protected area expansion analyses published in Nature on 18th December
2014 with the title "Global protected area expansion is compromised by projected
land-use and parochialism":
http://dx.doi.org/10.1038/nature14032
You can cite to our data by citing the original paper:
- Pouzols, <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- <NAME>
- Verburg, <NAME>.
- <NAME>
(2014) Global protected area expansion is compromised by projected land-use and
parochialism. Nature, volume 516, issue 7531 pp.383-386.
## REFERENCES
[1] http://cbig.it.helsinki.fi/files/zonation/zonation_manual_v4_0.pdf
[2] http://www.nature.com/nature/journal/vnfv/ncurrent/full/nature14032.html#supplementary-information
[3] http://data.okfn.org/doc/data-package
[4] http://www.gadm.org/
[5] http://10.1111/j.1365-2486.2012.02759.x
[6] http://www.iucnredlist.org
[7] http://www.worldwildlife.org/publications/terrestrial-ecoregions-of-the-world
[8] http://www.protectedplanet.net
[9] http://cbig.it.helsinki.fi/software/zonation/
| bed73fde2284cbc32ab00b51a9198073474c285a | [
"Markdown",
"Python"
] | 2 | Markdown | Yuan-NewHub/data-cbig | 8c89cc370d622cbe5fcedd6fede143e5cdcd8b39 | e822d9fbe44fb76cf03b4b9681a88b74a583b9d6 |
refs/heads/master | <repo_name>slaveatanasov/playground<file_sep>/src/app/components/banners/banners.component.scss
.banners-wrapper {
display: flex;
flex-direction: column;
}
.banner-1,
.banner-2,
.banner-3 {
width: 100%;
height: 600px;
}
.banner-1 {
position: relative;
background-color: black;
color: white;
background-image: url("../../../assets//images/banner-we-are.jpg");
background-repeat: no-repeat;
background-position: center center;
}
.banner-2 {
position: relative;
background-color: rgb(255, 255, 255);
background-image: url("../../../assets//images/banner-we-do.jpg");
background-repeat: no-repeat;
background-position: center center;
}
.banner-3 {
position: relative;
background-color: black;
color: white;
background-image: url("../../../assets//images//banner-careers.jpg");
background-repeat: no-repeat;
background-position: center center;
}
.banner-l {
background-color: white;
height: 600px;
width: 40%;
position: absolute;
top: 0;
}
.banner-r {
background-color: black;
height: 600px;
width: 40%;
position: absolute;
right: 0;
top: 0;
}
.button {
border: 1px solid rgb(255, 255, 255);
text-align: center;
max-width: 200px;
padding: 0.9rem;
font-weight: 300;
font-size: 0.9rem;
transition: 0.2s ease-out;
}
.button:hover {
border: 1px solid rgba(0, 0, 0, 0);
background: rgb(90, 89, 89);
cursor: pointer;
box-shadow: 0px 0px 1px 9px rgba(255, 255, 255, 0.5);
}
.button-rev {
border: 1px solid black;
text-align: center;
max-width: 200px;
padding: 1rem;
font-weight: 300;
font-size: 0.9rem;
transition: 0.2s ease-out;
}
.button-rev:hover {
border: 1px solid rgba(0, 0, 0, 0);
background: rgb(226, 226, 226);
cursor: pointer;
box-shadow: 0px 0px 1px 9px rgba(0, 0, 0, 0.5);
}
.text {
display: flex;
flex-direction: column;
height: 100%;
padding-left: 6rem;
justify-content: center;
}
.text h1 {
font-size: 4rem;
line-height: 4rem;
}
.text p,
.text .button,
.text .button-rev {
margin-top: 2rem;
}
.text p {
font-weight: 300;
font-size: 0.9rem;
}
.text-rev {
display: flex;
flex-direction: column;
align-items: flex-end;
padding-left: 0;
padding-right: 6rem;
text-align: end;
}
.banner-r-mobile {
display: none;
}
.banner-l-mobile {
display: none;
}
@media (max-width: 1128px) {
.banner-r {
bottom: 0;
left: 0;
width: 100%;
display: none;
}
.banner-l {
bottom: 0;
left: 0;
width: 100%;
display: none;
}
.banner-r-mobile {
height: 100%;
display: flex;
flex-direction: row;
justify-content: center;
align-items: flex-end;
}
.banner-l-mobile {
height: 100%;
display: flex;
flex-direction: row;
justify-content: center;
align-items: flex-end;
}
.text-mobile {
color: wheat;
width: 100%;
display: flex;
justify-content: space-around;
background: black;
padding: 120px;
background: rgba(0, 0, 0, 0.35);
}
.text-mobile p {
color: #e0e0e0;
}
.text-mobile-rev {
width: 100%;
display: flex;
justify-content: space-around;
background: rgba(255, 255, 255, 0.719);
padding: 120px;
color: black;
}
}
@media (max-width: 900px) {
.text-mobile {
height: 100%;
display: flex;
align-items: center;
flex-direction: column;
justify-content: center;
}
.text-mobile h1 {
font-size: 3rem;
}
.text-mobile p {
margin: 1.5rem 0;
}
}
@media (max-width: 600px) {
.text-mobile {
padding: 1rem;
}
}<file_sep>/src/styles.scss
@font-face {
font-family: 'Gotham';
src: url('./assets/fonts/Gotham-Medium.eot');
src: url('./assets/fonts/Gotham-Medium.eot?#iefix') format('embedded-opentype'),
url('./assets/fonts/Gotham-Medium.woff2') format('woff2'),
url('./assets/fonts/Gotham-Medium.woff') format('woff'),
url('./assets/fonts/Gotham-Medium.ttf') format('truetype'),
url('./assets/fonts/Gotham-Medium.svg#Gotham-Medium') format('svg');
font-weight: 500;
font-style: normal;
}
@font-face {
font-family: 'Gotham';
src: url('./assets/fonts/Gotham-Bold.eot');
src: url('./assets/fonts/Gotham-Bold.eot?#iefix') format('embedded-opentype'),
url('./assets/fonts/Gotham-Bold.woff2') format('woff2'),
url('./assets/fonts/Gotham-Bold.woff') format('woff'),
url('./assets/fonts/Gotham-Bold.ttf') format('truetype'),
url('./assets/fonts/Gotham-Bold.svg#Gotham-Bold') format('svg');
font-weight: 700;
font-style: normal;
}
@font-face {
font-family: 'Gotham';
src: url('./assets/fonts/Gotham-Book.eot');
src: url('./assets/fonts/Gotham-Book.eot?#iefix') format('embedded-opentype'),
url('./assets/fonts/Gotham-Book.woff2') format('woff2'),
url('./assets/fonts/Gotham-Book.woff') format('woff'),
url('./assets/fonts/Gotham-Book.ttf') format('truetype'),
url('./assets/fonts/Gotham-Book.svg#Gotham-Book') format('svg');
font-weight: 300;
font-style: normal;
}
* {
margin: 0;
padding: 0;
box-sizing: border-box;
}
body {
background: #f9f9f9;
font-family: Gotham;
position: relative;
max-width: 1440px;
margin: 0 auto;
}
.color-muted-white {
color: rgba(255, 255, 255, 0.700);
}
.line-height-standard {
line-height: 1.5rem;
}
.line-height-large {
line-height: 1.7rem;
}
.letter-spacing-standard {
letter-spacing: 0.5px;
}
.letter-spacing-large {
letter-spacing: 0.9px;
}
.letter-spacing-button {
letter-spacing: 1.8px;
}
i {
margin-left: 10px;
transition: 0.5s;
}<file_sep>/src/app/components/footer/footer.component.scss
.footer {
width: 100%;
height: 350px;
background-color: black;
color: white;
display: flex;
flex-direction: column;
align-items: center;
border-top: 2px solid rgba(255, 255, 255, 0.151);
}
.footer-element {
padding: 3.2rem 0 3.2rem 0;
}
.footer-element:first-child {
padding: 4.5rem 0 0 0;
}
.footer-element:last-child {
padding: 0 0 4rem 0;
}
.footer-nav {
width: 450px;
}
.menu {
display: flex;
justify-content: space-between;
font-size: 14px;
font-weight: 300;
}
.menu li {
list-style: none;
transition: 0.2s ease-in-out;
cursor: pointer;
}
.menu li:hover {
color: rgb(255, 255, 255);
}
li {
list-style: none;
}
.links {
display: flex;
justify-content: space-between;
}
.social {
width: 100px;
}
.social-button {
width: 20px;
fill: rgba(255, 255, 255, 0.26);
cursor: pointer;
}
.social-button:hover {
fill: rgba(255, 255, 255, 0.726);
}
@media (max-width: 500px) {
.footer-nav {
width: 80%;
}
}
<file_sep>/src/app/components/cards/cards.component.scss
.cards-wrapper {
background-color: #e2e2e0c0;
width: 100%;
height: 500px;
display: flex;
justify-content: center;
align-items: center;
}
.card {
display: flex;
flex-direction: column;
justify-content: space-between;
height: 350px;
width: 350px;
background-color: white;
margin-right: 2rem;
}
.card:last-child {
margin-right: 0;
}
.card-title-1,
.card-title-2,
.card-title-3 {
padding: 46px 0;
}
.card-title-1 {
background-image: url("../../../assets//images/card-play-harder.png");
background-repeat: no-repeat;
background-position: center center;
}
.card-title-2 {
background-image: url("../../../assets//images/card-simplicity.png");
background-repeat: no-repeat;
background-position: center center;
background-position-x: -10px;
}
.card-title-3 {
background-image: url("../../../assets//images/card-innovation.png");
background-repeat: no-repeat;
background-position: center center;
}
.card-content {
padding: 2rem;
text-align: center;
font-weight: 300;
font-size: 1.1rem;
}
.card-button {
padding: 14px 0 14px 0;
text-align: center;
background-color: black;
font-weight: 300;
font-size: 0.9rem;
cursor: pointer;
transition: 0.2s ease-in-out;
}
.card-button:hover {
color: white;
}
@media (max-width: 1200px) {
.cards-wrapper {
flex-direction: column;
padding: 0 12rem;
height: auto;
}
.card {
width: 100%;
margin: 0;
}
.card:nth-child(2) {
background: #ffffffb8;
}
.card-title-1,
.card-title-3 {
background-position: left;
}
.card-title-2 {
background-position: right;
}
.card-button {
align-self: flex-start;
padding: 14px;
}
.card-button-alt {
align-self: flex-end;
padding: 14px;
}
}
@media (max-width: 800px) {
.cards-wrapper {
padding: 0;
}
}
<file_sep>/README.md
Playground Application assignment/test by <NAME>. 09/04/2020
To run the app do the steps:
1. npm install
2. ng serve
<file_sep>/src/app/components/tabs/tabs.component.scss
.tabs-wrapper {
background-color: #e2e2e0;
width: 100%;
height: 500px;
display: flex;
justify-content: center;
align-items: center;
}
.tab-display {
display: flex;
justify-content: center;
align-items: center;
background: white;
height: 300px;
min-height: 280px;
width: 660px;
padding: 4rem;
font-size: 0.9rem;
font-weight: 300;
}
.tabs-nav {
height: 300px;
font-size: 0.9rem;
}
.tab-item {
background: rgb(196, 194, 194);
border-top: 1px solid rgba(255, 255, 255, 0.548);
height: 50px;
line-height: 50px;
width: 320px;
padding-left: 2rem;
font-weight: 300;
cursor: pointer;
transition: 0.1s;
}
.tab-item:first-child {
border-top: none;
}
.tab-item:hover {
background: rgb(214, 213, 213);
}
.tab-item.active:hover {
background: black;
}
.active {
background: black;
color: white;
}
.loader {
animation-name: spin;
animation-duration: 1000ms;
animation-iteration-count: infinite;
animation-timing-function: linear;
}
i {
margin-left: 10px;
transition: 0.5s;
}
.tab-item:not(.active):hover i {
margin-left: 26px;
}
.active i {
transform: rotate(90deg);
}
@keyframes spin {
from {
transform: rotate(0deg);
}
to {
transform: rotate(360deg);
}
}
@media (max-width: 1128px) {
.tabs-wrapper {
height: auto;
}
.tab-display {
flex-grow: 1;
}
.tabs-nav {
height: auto;
}
}
@media (max-width: 900px) {
.tabs-wrapper {
flex-direction: column;
}
.tabs-nav {
display: flex;
flex-direction: column;
width: 100%;
}
.tab-item {
flex-grow: 1;
height: 50px;
line-height: 50px;
}
.tab-display {
height: 10%;
width: 100%;
}
}
@media (max-width: 600px) {
.tabs-nav {
width: 100%;
}
.tab-item {
width: 100%;
}
}
<file_sep>/src/app/app.module.ts
import { BrowserModule } from '@angular/platform-browser';
import { NgModule } from '@angular/core';
import { FormsModule } from '@angular/forms';
import { SlideshowModule } from 'ng-simple-slideshow';
import { ScrollToModule } from '@nicky-lenaers/ngx-scroll-to';
import { AppRoutingModule } from './app-routing.module';
import { AppComponent } from './app.component';
import { NavigationComponent } from './components/navigation/navigation.component';
import { SliderComponent } from './components/slider/slider.component';
import { CardsComponent } from './components/cards/cards.component';
import { BannersComponent } from './components/banners/banners.component';
import { TabsComponent } from './components/tabs/tabs.component';
import { ContactFormComponent } from './components/contact-form/contact-form.component';
import { FooterComponent } from './components/footer/footer.component';
import { CookiesPolicyComponent } from './components/cookies-policy/cookies-policy.component';
@NgModule({
declarations: [
AppComponent,
NavigationComponent,
SliderComponent,
CardsComponent,
BannersComponent,
TabsComponent,
ContactFormComponent,
FooterComponent,
CookiesPolicyComponent
],
imports: [
BrowserModule,
AppRoutingModule,
FormsModule,
SlideshowModule,
ScrollToModule.forRoot()
],
providers: [],
bootstrap: [AppComponent]
})
export class AppModule { }
<file_sep>/src/app/components/cookies-policy/cookies-policy.component.scss
.cookies-policy-wrapper {
width: 100%;
max-width: 1440px;
position: fixed;
bottom: 0;
left: 50%;
transform: translateX(-50%);
height: 50px;
display: flex;
justify-content: center;
align-items: center;
background-color: white;
color: black;
transition: 1s;
}
.cookies-policy-wrapper .display-none {
transition: 1s;
}
.button {
background: black;
color: white;
padding: 0 2.5rem;
line-height: 30px;
font-size: 10px;
font-weight: 300;
transition: 0.1s;
}
.button:hover {
background: rgba(0, 0, 0, 0.836);
cursor: pointer;
}
p {
font-weight: 300;
margin-right: 2rem;
font-size: 10px;
letter-spacing: 0.3px;
}
.display-none {
opacity: 0;
}
.display-none-2 {
display: none;
}
@media (max-width: 850px) {
.cookies-policy-wrapper {
height: auto;
padding: 1rem 2.5rem;
}
}
<file_sep>/src/app/components/navigation/navigation.component.ts
import { Component, OnInit, HostListener } from '@angular/core';
@Component({
selector: 'app-navigation',
templateUrl: './navigation.component.html',
styleUrls: ['./navigation.component.scss']
})
export class NavigationComponent implements OnInit {
navScrolled: boolean;
constructor() { }
ngOnInit() {
this.navScrolled = false;
}
@HostListener('window:scroll', ['$event'])
onWindowScroll(e) {
if (window.pageYOffset > 150) {
this.navScrolled = true;
} else {
this.navScrolled = false;
}
}
}
<file_sep>/src/app/components/tabs/tabs.component.ts
import { Component, OnInit } from '@angular/core';
interface dataItem {
id: number;
title: string;
content: string[]
}
@Component({
selector: 'app-tabs',
templateUrl: './tabs.component.html',
styleUrls: ['./tabs.component.scss']
})
export class TabsComponent implements OnInit {
data: dataItem[] = [
{
"id": 1,
"title": "Vestibulum at odio sit amet",
"content": [
"Vestibulum at odio sit amet diam consectetur congue.",
"Donec imperdiet tincidunt nisi non dignissim.",
"Maecenas diam metus, fermentum a velit ut, auctor consequat ligula.",
"In ultrices lobortis venenatis.",
"Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Quisque dignissim sit amet lectus ac tincidunt.",
"Quisque bibendum mi at tempus tempus.",
"Suspendisse pretium, quam eu faucibus cursus, nunc leo pharetra justo, ut rutrum lorem ipsum quis velit.",
"Aenean imperdiet molestie dignissim.",
"Curabitur faucibus nulla metus, vel ornare libero accumsan eget."
]
},
{
"id": 2,
"title": "Sed vehicula neque",
"content": [
"Donec ultricies felis non sem euismod, vel hendrerit metus porttitor.",
"Donec sed ex ut tellus ultricies vestibulum vitae sit amet erat.",
"Etiam tempor, libero eget bibendum auctor, risus nulla finibus dolor, in ullamcorper ipsum sapien dignissim felis.",
"Nunc mattis sagittis mi, at hendrerit dolor semper non.",
"Sed scelerisque sollicitudin felis euismod laoreet.",
"Nunc elementum purus orci, nec ornare mi vehicula at.",
"Praesent porta nisi in magna aliquam, sed consequat turpis ornare.",
"Sed laoreet porttitor purus vitae tincidunt."
]
},
{
"id": 3,
"title": "Sed vehicula neque",
"content": [
"Nunc mattis sagittis mi, at hendrerit dolor semper non.",
"Sed scelerisque sollicitudin felis euismod laoreet.",
"Nunc elementum purus orci, nec ornare mi vehicula at.",
"Praesent porta nisi in magna aliquam, sed consequat turpis ornare.",
"Sed laoreet porttitor purus vitae tincidunt.",
"Donec ultricies felis non sem euismod, vel hendrerit metus porttitor.",
"Donec sed ex ut tellus ultricies vestibulum vitae sit amet erat.",
"Etiam tempor, libero eget bibendum auctor, risus nulla finibus dolor, in ullamcorper ipsum sapien dignissim felis."
]
}
]
selectedItem: dataItem;
loading: boolean;
selectedTab: number;
constructor() { }
ngOnInit() {
this.loading = true;
this.data.map(item => {
if (item.id === 1) {
this.selectedTab = item.id;
setTimeout(() => {
this.selectedItem = item;
this.loading = false;
}, 800)
}
})
}
displaySelected(id: number) {
this.loading = true;
this.data.map(item => {
if (item.id === id) {
this.selectedTab = id;
setTimeout(() => {
this.selectedItem = item;
this.loading = false;
}, 1200)
}
})
}
}
<file_sep>/src/app/components/slider/slider.component.scss
.slider-wrapper {
position: relative;
width: 100%;
height: 800px;
}
.content {
color: white;
position: absolute;
left: 18%;
top: 35%;
width: 260px;
}
h1 {
font-size: 4.25rem;
line-height: 4rem;
}
p {
font-weight: 300;
font-size: 0.9rem;
margin-top: 2rem;
}
| 05626421013e7de042b7cf02143ad60eb8a9c1c9 | [
"TypeScript",
"Markdown",
"SCSS"
] | 11 | TypeScript | slaveatanasov/playground | cddf5959f79ea07fdba76707be78076288765d5f | 3ccd71d02a6d38679970a1f515f43b63d36ee5e0 |
refs/heads/master | <file_sep>package kpmovil.app;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.URI;
import org.apache.http.HttpResponse;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.DefaultHttpClient;
import android.os.Bundle;
import android.app.Activity;
import android.content.Intent;
import android.util.Log;
import android.view.View;
import android.view.View.OnClickListener;
import android.widget.Button;
import android.widget.EditText;
import android.widget.TextView;
public class IniciarSesion extends Activity {
Button ver_archivos;
private EditText mat;
private EditText con;
String str ="";
private TextView mensaje;
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_iniciar_sesion);
ver_archivos=(Button) findViewById(R.id.btnIniciar);
mat=(EditText) findViewById(R.id.txtMat);
con=(EditText) findViewById(R.id.txtCon);
mensaje=(TextView) findViewById(R.id.mensaje);
ver_archivos.setOnClickListener(new OnClickListener() {
private StringBuilder inputStreamToString(InputStream is) {
String line = "";
StringBuilder total = new StringBuilder();
// Wrap a BufferedReader around the InputStream
BufferedReader rd = new BufferedReader(new InputStreamReader(is));
// Read response until the end
try {
while ((line = rd.readLine()) != null) {
total.append(line);
}
} catch (IOException e) {
e.printStackTrace();
}
// Return full string
return total;
}
public void onClick(View v) {
try {
HttpClient client = new DefaultHttpClient();
HttpGet request = new HttpGet();
String url="http://kp.utch.edu.mx/loginmov.php?mat="+mat.getText().toString()+"&con="+con.getText().toString()+"";
Log.e("Tania", "Lo que trae lo que voy a solicitar"+url);
request.setURI(new URI(url));
HttpResponse response = client.execute(request);
str =inputStreamToString(response.getEntity().getContent()).toString();
Log.e("Tania", "Imprimiendo lo que tiene response"+str);
} catch (Exception e) {
Log.e("Tania", "Error al conectarse"+e.getMessage());
}
if(str.equals("ok")){
Intent i = new Intent(IniciarSesion.this,Menu.class);
i.putExtra("mat", mat.getText().toString());
i.putExtra("con", con.getText().toString());
mensaje.setText("");
startActivity(i);
finish();
}
else{
mensaje.setText("Información incorrecta");
}
}
});
}
}
<file_sep>package kpmovil.app;
public class Ucambio {
public static String cambio="";
}
<file_sep>package kpmovil.app;
import android.app.Activity;
import android.app.AlertDialog;
import android.content.DialogInterface;
import android.content.Intent;
import android.os.Bundle;
import android.view.View;
import android.view.View.OnClickListener;
import android.widget.Button;
import android.widget.ListView;
import android.widget.TextView;
public class SubirArchivos extends Activity {
Button btnSel;
public static Button btnSub;
public static TextView txtNomArch;
ListView lstdir;
String mat, con;
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.subirarchivos);
mat= getIntent().getExtras().getString("mat");
con= getIntent().getExtras().getString("con");
txtNomArch= (TextView) findViewById(R.id.archivo);
txtNomArch.setText("Seleccionar Archivo ");
btnSel= (Button) findViewById(R.id.btnsel);
btnSub= (Button) findViewById(R.id.btnsubir);
btnSub.setClickable(false);
btnSel.setOnClickListener(new OnClickListener() {
public void onClick(View v) {
try{
try {
Intent i= new Intent(SubirArchivos.this, OtroFileBrowserActivity.class);
i.putExtra("mat", mat);
i.putExtra("con", con);
startActivity(i);
} catch (Throwable e) {
e.printStackTrace();
}
}
catch(Exception e){
}
}
});
btnSub.setOnClickListener(new OnClickListener() {
public void onClick(View arg0) {
subir sub= new subir();
sub.uploadFile(txtNomArch.getText().toString(), mat, con);
alerts();
Ucambio.cambio="Se subió archivo: "+txtNomArch.getText().toString();
}
});
}
public void alerts(){
new AlertDialog.Builder(this)
.setTitle("Archivo Subido")
.setPositiveButton("OK",
new DialogInterface.OnClickListener() {
public void onClick(DialogInterface dialog, int which) {
}
}).show();
}
}<file_sep>package kpmovil.app;
import android.os.Bundle;
import android.app.Activity;
import android.content.Intent;
import android.util.Log;
import android.view.View;
import android.view.View.OnClickListener;
import android.widget.Button;
public class Menu extends Activity {
Button btnVerA;
Button btnSubirA;
Button btnInfoU;
String mat="";
String con="";
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.menu);
mat=this.getIntent().getExtras().getString("mat");
con=this.getIntent().getExtras().getString("con");
btnVerA=(Button) findViewById(R.id.btnVerA);
btnSubirA=(Button) findViewById(R.id.btnSubirA);
btnInfoU=(Button) findViewById(R.id.btnInfoU);
btnVerA.setOnClickListener(new OnClickListener() {
public void onClick(View v) {
Intent i = new Intent(Menu.this,ver_archivos.class);
i.putExtra("mat", mat);
i.putExtra("con", con);
startActivity(i);
}
});
btnSubirA.setOnClickListener(new OnClickListener() {
public void onClick(View v) {
Intent i = new Intent(Menu.this,SubirArchivos.class);
i.putExtra("mat", mat);
i.putExtra("con", con);
startActivity(i);
}
});
btnInfoU.setOnClickListener(new OnClickListener() {
public void onClick(View v) {
Intent i = new Intent(Menu.this,info.class);
i.putExtra("mat", mat);
i.putExtra("con", con);
startActivity(i);
}
});
}
}
| b368133e4b59881a78aecd93e18b15e07b25b1ce | [
"Java"
] | 4 | Java | irvinghisa/AKioPrint-Movil | 23f7d7ff0f4887a24263b43d6bf3c85e4494a651 | dc26768b5a8b75f215e669544bb28d845a9153fc |
refs/heads/master | <file_sep>import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import train_test_split
import jieba
import os
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
import math
INPUT_DIM = 784
LAYER1_NODES = 392
LAYER2_NODES = 196
OUTPUT_NODES = 10
MOVING_AVERAGE_DECAY = 0.99
REGULARIZATION_RATE = 0.0001
BATCH_SIZE = 200
LEARNING_RATE_BASE = 0.8
LEARNING_RATE_DECAY = 0.99
TRAINING_NUM = 5
def ac_net_inference(input_tensor, avg_class):
with tf.variable_scope('layer1'):
layer1_weights = tf.get_variable('weight', [INPUT_DIM, LAYER1_NODES],
initializer=tf.truncated_normal_initializer(stddev=0.1))
layer1_bias = tf.get_variable('bias', [LAYER1_NODES], initializer=tf.constant_initializer(0.0))
with tf.variable_scope('layer2'):
layer2_weights = tf.get_variable('weight', [LAYER1_NODES, LAYER2_NODES],
initializer=tf.truncated_normal_initializer(stddev=0.1))
layer2_bias = tf.get_variable('bias', [LAYER2_NODES], initializer=tf.constant_initializer(0.0))
with tf.variable_scope('layer3'):
layer3_weights = tf.get_variable('weight', [LAYER2_NODES, OUTPUT_NODES],
initializer=tf.truncated_normal_initializer(stddev=0.1))
layer3_bias = tf.get_variable('bias', [OUTPUT_NODES], initializer=tf.constant_initializer(0.0))
if avg_class is None:
layer1_out = tf.nn.relu(tf.matmul(input_tensor, layer1_weights) + layer1_bias)
layer2_out = tf.nn.relu(tf.matmul(layer1_out, layer2_weights) + layer2_bias)
return tf.matmul(layer2_out, layer3_weights) + layer3_bias
else:
layer1_out = tf.nn.relu(tf.matmul(input_tensor, avg_class.average(layer1_weights))
+ avg_class.average(layer1_bias))
layer2_out = tf.nn.relu(tf.matmul(layer1_out, avg_class.average(layer2_weights))
+ avg_class.average(layer2_bias))
return tf.matmul(layer2_out, avg_class.average(layer3_weights)) + avg_class.average(layer3_bias)
def ac_net_train(train_feed, valid_feed):
data = tf.placeholder(tf.float32, [None, INPUT_DIM], name='x-input')
label = tf.placeholder(tf.float32, [None, OUTPUT_NODES], name='y-input')
global_step = tf.Variable(0, trainable=False)
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variable_op = variable_averages.apply(tf.trainable_variables())
output = ac_net_inference(data, variable_averages)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=output, labels=tf.argmax(label, 1))
cross_entropy_mean = tf.reduce_mean(cross_entropy)
regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
with tf.variable_scope('layer1', reuse=True):
weights1 = tf.get_variable('weight')
with tf.variable_scope('layer2', reuse=True):
weights2 = tf.get_variable('weight')
with tf.variable_scope('layer3', reuse=True):
weights3 = tf.get_variable('weight')
regularization = regularizer(weights1) + regularizer(weights2) + regularizer(weights3)
loss = cross_entropy_mean + regularization
learning_rate = tf.train.exponential_decay(
LEARNING_RATE_BASE,
global_step,
33600 / BATCH_SIZE,
LEARNING_RATE_DECAY)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,
global_step=global_step)
train_op = tf.group(train_step, variable_op)
correct_prediction = tf.equal(tf.argmax(output, 1), tf.argmax(label, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.Session() as sess:
global_init = tf.global_variables_initializer()
sess.run(global_init)
for i in range(TRAINING_NUM):
for j in range(2):
pass
gcount = 3
def global_test():
global gcount
gcount += 1
print(gcount)
if __name__ == '__main__':
# data = pd.read_csv('train.csv')
# target = data['label']
# data.drop(['label'], inplace=True)
# traindata, validdata, trainlabel, validlabel = train_test_split(data, target, test_size=0.8)
# f = open('D:\ct.txt','r',encoding='utf-8')
# a = f.read()
# b = jieba.cut(a)
# wordcount = CountVectorizer()
# tfidf = TfidfTransformer()
# wordres = wordcount.fit_transform(b)
# print(wordcount.get_feature_names())
# f.close()
global_test()
| 5082e8206360c987d6db063ebaabf0c8af3643b6 | [
"Python"
] | 1 | Python | freedenS/Kaggle_Project | b4638113563bd46018a3b87ff6edbc5a118a5e56 | 59268be84955b504063c6126c7afdb227f1d954b |
refs/heads/master | <repo_name>saloni024/react_tutorial<file_sep>/src/index.js
import React from "react"
import ReactDOM from "react-dom"
//import MyList from "./components/MyList"
//import MyInfo from "./components/MyInfo" //look for the library from the current directory [it's okay to not put .js]
import App from "./components/App"
/*
//ReactDOM and JSX
ReactDOM.render(<div><h1>Hello world</h1><p>This is paragraph!</p></div>,document.getElementById("root"))
//Functional components
var p1 = document.createElement("p")
p1.setAttribute("id","p1")
document.getElementById("root").appendChild(p1)
ReactDOM.render(<MyList/>, document.getElementById("p1"))
//Functional components practice
var p2 = document.createElement("p")
p2.setAttribute("id","p2")
document.getElementById("root").appendChild(p2)
ReactDOM.render(<MyInfo/>, document.getElementById("p2"))*/
//parent/child nested component
var p3 = document.createElement("p")
p3.setAttribute("id","p3")
document.getElementById("root").appendChild(p3)
ReactDOM.render(<App />, document.getElementById("p3"))<file_sep>/src/components/Footer.js
import React from "react"
import Form from "./Form"
function Footer(){
return (
<footer className="footer">
<Form />
</footer>
)
}
export default Footer<file_sep>/src/components/MyList.js
import React from "react"
function MyList(){ //prefer capital camel casing in naming convention
return (
<div>Series list:
<ul>
<li>The office</li>
<li>Brooklyn 99</li>
<li>Suits</li>
</ul>
</div>
)
}
export default MyList<file_sep>/src/components/ContactCard.js
import React from "react"
/*function ConatctCard(props){
//props.contact.imgUrl if passing the object
return (
<div className="contact-card">
<img src={props.imgUrl} alt={props.imgAlt} height="80px"></img>
<h4>{props.name}</h4>
<p>Phone: {props.phone}</p>
<p>Email: {props.email}</p>
</div>
)
}*/
class ContactCard extends React.Component{
constructor(){//to initialize values
super()//gets components from React.Component, always add it in constructor
this.state = {isLoggedIn: true}//state object, add always
this.ChangeState = this.ChangeState.bind(this) //binds the method with the class, compulsory
}
ChangeState(){
this.setState(prevState => {
return {
isLoggedIn: prevState.isLoggedIn ? false : true
}
})
}
render(){
let status = ""
let update = ""
if(this.state.isLoggedIn){
status = "in"
update = "out"
}
else{
status = "out"
update = "in"
}
return (
<div className="contact-card">
<img src={this.props.contact.imgUrl} alt={this.props.contact.imgAlt} onClick={() => DisplayName(this.props.contact)}height="80px"></img>
<h4>{this.props.contact.name}</h4>
<p>Phone: {this.props.contact.phone}</p>
<p>Email: {this.props.contact.email}</p>
<p>Status: logged {status}</p>
<button onClick={this.ChangeState}>Log {update}</button>
</div>
)
}
}
function DisplayName(props){
return(
console.log(props.name)
)
}
export default ContactCard<file_sep>/src/components/MainContent.js
import React from "react"
import ContactCard from "./ContactCard"
import contactData from "./ContactData"
function MainContent(){
const date = new Date()
const hours = date.getHours()
let timeOfDay
//initiating object
const styles = {
textDecoration: "underline"
}
if(hours < 12){
timeOfDay = "morning"
styles.color = "navy" //setting object's property
}else if(hours >=12 && hours < 17){
timeOfDay = "afternoon"
styles.color = "yellow"
}else{
timeOfDay = "evening"
styles.color = "black"
}
//put 2 curly braces while doing inline css cause 1st for Javascript object 2nd for javascript component inside JSX code...no dash use Camel case in properties
const contactComponent = contactData.map(data => <ContactCard key={data.id} contact={data}/>)
return(
<main className="main">
<p style={/*{color: "navy", textDecoration: "underline"}*/styles}>Good {`${timeOfDay} :) `}<br></br>It is currently about {hours % 12} o'clock!</p>
<div className="contact-list">
<h3>Contact List</h3>
{contactComponent}
</div>
</main>
)
}
export default MainContent<file_sep>/src/components/App.js
import React from "react"
import Navbar from "./Navbar"
import MainContent from "./MainContent"
import Footer from "./Footer"
/*function App(){
return (
<div>
<Navbar />
<MainContent />
<Footer />
</div>
)
}*/
//if the class method has parameters use this.props to access
class App extends React.Component{
constructor(){
super()
this.state = {
isLoading: true,
swapiData: {}
}
}
componentDidMount(){
setTimeout(()=>{
this.setState({
isLoading: false
}
)}, 2500)
fetch("https://swapi.dev/api/people/20/")//gets the data from url
.then(response => response.json())
.then(data => {
this.setState({
swapiData: data
})
})
}
render(){
if(this.state.isLoading){
return(
<div>
<h1>Pass on what you have learned.</h1>
<h1>-{this.state.swapiData.name}</h1>
</div>
)
}
return (
<div>
<Navbar />
<MainContent />
<Footer />
</div>
)
}
/*
static getDerivedStateFromProps(props, state){
returns the new updated state based on props
}
getSnapshotBeforeUpdate(){
//createa backup of the current way things are
}
componentDidMount(){
runs only one time in beginning
get the data we need to correctly decide
}
componentWillReceiveProps(nextProps){
checks for the upcoming/receiving components
if(nextProps.whatever !== this.props.whatever){
perform something
--check if the receiving prop is different then current prop and performs according to that
}
}
shouldComponentUpdate(nextProps, nextState){
//wheather the components need to change or not
//return true or false
}
componentWillUnmount(){
//cleans up before the code disappears
//eg. removes event listeners
}
*/
}
export default App<file_sep>/src/components/Navbar.js
import React from "react"
function Navbar(){
const firstName = "Dwight"
const lastname = "Schrute"
return (
/*<nav>
<h3>Parent/child nested components</h3>
<ul>
<li>1</li>
<li>2</li>
<li>3</li>
</ul>
</nav>*/
//curly braces indecates javascript
<div>
<header className="navbar">Hello {firstName + " " + lastname}!</header>
</div>
)
}
export default Navbar | cc4ba60a5d298ba64bc7fcd355c23d28bcc9d5d3 | [
"JavaScript"
] | 7 | JavaScript | saloni024/react_tutorial | c5f0d784c1cbccb7a1a2617d669b43861c479663 | 5d023f0623d92c204fa9532325d2db129ea008d8 |
refs/heads/master | <file_sep><template>
<div style="background-color:#007799">
<v-btn small icon style="cursor:pointer" @click="backToPage">
<!-- v-if="this.$route.path === '/auth/registration'"
@click="$router.go(-1)"-->
<v-icon style="color: white;">arrow_back</v-icon>
</v-btn>
<!-- <p>asdfasdfaf</p> -->
<v-layout row wrap class="backgroundLogin pt-3">
<v-flex xs12 md12>
<!-- <slider /> -->
</v-flex>
<v-flex xs12 md12>
<div style="overflow:hidden; background-color:#007799 ">
<!-- <v-img :src="require('@/assets/authBackground.png')" position="top"> -->
<div class="resizeIfDesktop">
<login v-if="loginModalShow" @loginDialog="loginDialog" @showRegInfo="showRegInfo"></login>
<regInfo v-if="regInfo" @registrationDialog="registrationDialog" @showLogin="showLogin"></regInfo>
</div>
<!-- </v-img> -->
<!-- <register class="absoluteForm"></register> -->
</div>
</v-flex>
</v-layout>
</div>
</template>
<script>
import login from "@/views/modalView/login";
import regInfo from "@/views/modalView/regInfo";
import register from "@/views/modalView/registration";
import slider from "@/components/local/auth/auth.slider";
export default {
data: () => ({
regInfo: false,
loginModalShow: true
}),
components: {
slider,
login,
register,
regInfo
// loginFormCarrier
},
mounted() {
// console.log(this.$route.path);
},
methods: {
loginDialog(val) {
// this.loginModalShow =val
this.$emit("closeModalFromLogin", val);
},
registrationDialog(val) {
this.$emit("closeModalFromRegistration", val);
},
backToPage() {
const val = false;
this.$emit("backToPage", val);
},
showRegInfo(val) {
this.regInfo = val;
this.loginModalShow = !val;
},
showLogin(val) {
this.loginModalShow = val;
this.regInfo = !val;
}
}
};
</script>
<style>
@media screen and (min-width: 640px) {
.resizeIfDesktop {
padding: 200px;
}
}
@media screen and (max-width: 640px) {
.resizeIfDesktop {
}
}
.backgroundLogin {
/* background-color: white; */
background-color: #007799;
}
.curvyDiv {
min-height: 100%;
background-color: #007799;
overflow: hidden;
}
.absoluteForm {
/* position: relative;
margin-left: -75px;
top: 67%;
left: 20%;
bottom: 0%; */
}
</style>
<file_sep>/* eslint-disable no-console */
import ApiService from "./api.service";
const mentorService = {
//random mentor data for home
async fetchRandomMentorData() {
try {
const response = await ApiService.get(
"users?operation=RandomMentorData&limit=10&page=1"
);
// console.log(response);
return response.data.data;
} catch (error) {
// console.log(error.response);
}
},
// fetch a mentor's basic data by his id (api no 3)
async fetchMentorBasicDataById(mentor_id) {
try {
const response = await ApiService.get(
"users?mentor_id=" + mentor_id + "&operation=BasicMentorData"
);
// console.log(response);
return response.data.data;
} catch (error) {
// console.log(error.response);
}
},
// fetch a mentor's details data by his id (api no 4)
async fetchMentorDetailsDataById(mentor_id) {
try {
const response = await ApiService.get(
// users?mentor_id=5<PASSWORD>&operation=DetailMentorData
"users?mentor_id=" + mentor_id + "&operation=DetailMentorData"
);
// console.log(response);
return response.data.data;
} catch (error) {
// console.log(error.response);
}
},
// summary count of sessions of a mentor (api no 8)
async fetchMentorSessionCountById(mentor_id) {
try {
const response = await ApiService.get(
"user-sessions?mentorId=" + mentor_id + "&operation=Summary"
);
// console.log(response);
return response.data.data;
} catch (error) {
// console.log(error.response);
}
},
// https://test.api.quizards.pro/user-sessions?mentorId=5ccaa4c1df0c2c10615987b3&operation=MentorReviewsFromUsers
// get all reviews of Mentees agains a mentor (api no 12)
async fetchAllReviews(_id, mode, reviews_from) {
try {
const response = await ApiService.get(
"user-sessions?" +
mode +
"Id=" +
_id +
"&operation=" +
reviews_from +
""
);
// UserReviewsFromMentors | MentorReviewsFromUsers
// console.log(response);
return response.data.data;
} catch (error) {
// console.log(error.response);
}
},
async fetchRatingAverage(_id, mode, reviews_from) {
// if (mode===)
try {
const response = await ApiService.get(
"user-sessions?" +
mode +
"Id=" +
_id +
"&operation=" +
reviews_from +
""
);
// UserReviewsFromMentors | MentorReviewsFromUsers
// console.log(response);
return response.data.data;
} catch (error) {
// console.log(error.response);
}
}
};
export default mentorService;
<file_sep><template>
<div class="my-3">
<v-card class="elevation-0" v-for="(item, i) in items" :key="i">
<v-card-title>
<h2>{{ item.title }}</h2>
</v-card-title>
<div v-for="(example, i) in item.examples" :key="i">
<v-card-text>{{ example }}</v-card-text>
</div>
</v-card>
</div>
</template>
<script>
export default {
data: () => ({
//informatics items
items: [
{
icon: "assignment",
iconClass: "blue white--text",
title: "Popular Careers",
examples: ["coming soon"]
},
{
icon: "call_to_action",
iconClass: "amber white--text",
title: "Average Salary/month",
examples: ["coming soon"]
}
],
itemsDemo: [
{
icon: "assignment",
iconClass: "blue white--text",
title: "Popular Careers",
examples: ["UI/UX Development", "Data Science", "Block Chain etc"]
},
{
icon: "call_to_action",
iconClass: "amber white--text",
title: "Average Salary/month",
examples: [
"UI/UX Development(1500$)",
"Data Science(2000$)",
"Block Chain(2323$)"
]
}
]
})
};
</script>
<style>
.v-card__text {
padding: 3px 16px;
font-size: 18px;
}
@media screen and (max-width: 375px), screen and (max-height: 667px) {
.v-list__tile--avatar {
height: 38px;
}
}
</style>
<file_sep><template>
<div>
<v-btn icon @click="backToProfile">
<v-icon>arrow_back</v-icon>
</v-btn>
<v-container>
<basic :userDetails="userDetails" />
<skills :skills="skills" :userDetails="userDetails" />
<education :userDetails="userDetails" />
<experience :userDetails="userDetails" />
</v-container>
<v-snackbar
v-model="snackbar"
:color="color"
:multi-line="mode === 'multi-line'"
:timeout="timeout"
:vertical="mode === 'vertical'"
:top="y === 'top'"
:bottom="y == 'bottom'"
>
{{ snackbartext }}
<v-btn dark flat @click="snackbar = false">Close</v-btn>
</v-snackbar>
</div>
</template>
<script>
import education from "@/components/local/profileUpdateModal/profileUpdateModal.education";
import experience from "@/components/local/profileUpdateModal/profileUpdateModal.experience";
import { UserInfoService } from "@/service/storage.service";
import skills from "@/components/local/profileUpdateModal/profileUpdateModal.skill";
import basic from "@/components/local/profileUpdateModal/profileUpdateModal.basic";
import { mapActions } from "vuex";
export default {
components: { basic, skills, education, experience },
props: ["userDetails"],
data: () => ({
topSkillValue: [],
skillValue: [],
name: "",
company: "",
designation: "",
email: "",
mobile: "",
skills: [],
snackbar: false,
color: "error",
mode: "",
timeout: 4000,
snackbartext: "",
y: "",
userId: UserInfoService.getUserID(),
education: ""
}),
mounted() {
// console.log(this.userDetails);
},
async created() {
this.skills = await this.fetchAllSkills();
// console.log("i am skills", this.skills);
},
methods: {
...mapActions("commonUserStore", ["saveUpdateProfile", "fetchAllSkills"]),
async updateProfile() {
// console.log("asdf");
let userData = {
email: this.userDetails.email,
phone: this.userDetails.phone,
age: 27,
sex: "M",
imageUrl: "",
address: this.userDetails.address,
company: this.userDetails.company,
designation: this.userDetails.designation,
bio: this.userDetails.bio,
university: "Daffodil university",
skills: this.userDetails.skills,
industry: ["5ccc717bba317413de5149da"],
name: this.userDetails.name,
recommendations: [
{
name: "<NAME>",
imageUrl: "",
designation: "Lead Engineer",
company: "Funush Private limited",
_id: "5cd90265faf5bd5cf7cf50dc",
comment: "he is quite good at programming and a good learner also"
}
],
references: [
{
name: "<NAME>",
designation: "Lead Engineer",
company: "Funush Private limited",
mobile: "01687123123",
email: "<EMAIL>",
_id: "5cd90265faf5bd5cf7cf50db"
}
],
experience: [
{
designation: "Front End Engineer",
company: "Funush Private limited",
_id: "5cd90265faf5bd5cf7cf50da"
},
{
designation: "Full Stack Engineer",
company: "Future Tech IT",
_id: "5cd90265faf5bd5cf7cf50d9"
}
]
};
// console.log(userData);
// console.log("my userId", this.userId);
try {
const response = await this.saveUpdateProfile({
user_data: userData,
user_id: this.userId
});
// console.log("check my response ", response);
if (!response) {
this.showAlert("Profile Update Failed!", "error", "top");
} else {
this.showAlert("Profile Update Successful!", "success", "top");
var getDialogValue = false;
this.$emit("setDialogValue", getDialogValue);
}
return response;
} catch (error) {
console.log(error);
return null;
}
},
showAlert(msg, color, ypos) {
this.color = color;
this.snackbartext = msg;
this.y = ypos;
this.snackbar = true;
},
backToProfile() {
var getDialogValue = false;
this.$emit("setDialogValue", getDialogValue);
},
educationData(e) {
this.education = e;
// console.log(this.education);
}
}
};
</script>
<file_sep><template>
<div style="background-color:white;">
<v-card flat style="background-color:white;" class="pt-2 px-2">
<v-layout row wrap>
<v-flex>
<span style="color:#007790;">
<Strong>Reviews</Strong>
</span>
</v-flex>
<v-flex style="display: flex;
justify-content: flex-end;" v-if="showSeeMore">
<span style="color:#007790;" @click="dialog=true">
<Strong >See More</Strong>
</span>
</v-flex>
</v-layout>
<br>
<div v-if="noReview" style="text-align:center;padding-bottom:10px">
<p>No Review</p>
</div>
<v-layout row wrap v-if="!noReview">
<v-flex xs2>
<v-btn small icon flat>
<v-avatar>
<v-img :src="require('@/assets/my.jpg')"></v-img>
</v-avatar>
</v-btn>
</v-flex>
<v-flex xs4>
<span class="caption">{{reviewArray[0].userId.name}}</span>
<br>
<span class="grey--text text--lighten-2 caption">({{reviewArray[0].averageRatingFromUser}})</span>
<v-rating v-model="reviewArray[0].averageRatingFromUser" dense color="grey" readonly size="4" half-increments></v-rating>
<span class="caption">1 days ago</span><br>
</v-flex>
<v-flex xs16 class="px-2">
<p>{{reviewArray[0].sessionReviewFromUser}}</p>
</v-flex>
</v-layout>
</v-card>
<v-dialog
v-if="dialog"
v-model="dialog"
fullscreen
hide-overlay
transition="slide-x-transition"
>
<reviewRatings :allRatingAverage="allRatingAverage" :reviewArray="reviewArray" @sendDialogValue="getDialogValue"></reviewRatings>
</v-dialog>
</div>
</template>
<script>
import { mapActions, mapGetters } from "vuex";
import reviewRatings from "@/views/modalView/rating&Reviews.vue";
export default {
props: ["mentorBasic"],
components: {
reviewRatings
},
data: () => ({
reviewArray:[],
noReview:false,
showSeeMore:false,
rating: 4.5,
dialog: false,
allRatingAverage:[],
}),
async created() {
var reviewsFrom = "";
const response = await this.fetchAllReviews({
reviews_from: "MentorReviewsFromUsers",
mode:"mentor",
_id: this.mentorBasic._id
});
this.allRatingAverage = await this.fetchRatingAverage({
reviews_from: "AvgMentorReviewFromUsers",
mode:"mentor",
_id: this.mentorBasic._id
});
console.log('asdfasfasdf',response);
if(response.length >=1){
this.showSeeMore = true;
this.reviewArray =response;
}
if(response.length ===0){
this.noReview = true;
}
},
methods: {
...mapActions("mentorStore", ["fetchAllReviews","fetchRatingAverage"]),
getDialogValue(val) {
this.dialog = val;
}
}
};
</script>
<style></style>
// // const allRatingAverage = await this.fetchRatingAverage({
// reviews_from: "AvgMentorReviewFromUsers",
// mode:"mentor",
// _id: this.mentorBasic._id
// });<file_sep><template>
<div style="background-color:#eee">
<div style="background-color:white;">
<v-layout row wrap class="modalHeader">
<v-flex xs12>
<slider :cardNo="cardNo" @sliderPosition="sliderPosition" />
</v-flex>
<v-flex xs12>
<testTitle class="pt-4" />
</v-flex>
<v-flex xs12 mx-2>
<cardContainer
class="pb-5"
:visibleCards="visibleCards"
@slidedQuestion="slidedQuestion"
:cardNo="cardNo"
/>
</v-flex>
</v-layout>
</div>
</div>
</template>
<script>
import { mapActions, mapState, mapGetters } from "vuex";
import slider from "@/components/local/careerTest/careerTest.slider.vue";
// import cardCareer from "@/components/local/careerTest/careerTest.cardCareer.vue"
import cardContainer from "@/components/local/careerTest/careerTest.cardContainer.vue";
import testTitle from "@/components/local/careerTest/careerTest.heading.vue";
export default {
components: {
slider,
cardContainer,
testTitle
// cardCareer
},
data: () => ({
cardNo: 1,
visibleCards: []
}),
async created() {
// console.log("I am created");
try {
const response = await this.fetchAllQuestion();
this.visibleCards = response;
} catch (error) {
console.log(error);
}
},
computed: {
...mapState("careerTestStore", ["questions"])
},
methods: {
...mapActions("careerTestStore", ["fetchAllQuestion"]),
slidedQuestion(e) {
this.cardNo = e;
},
sliderPosition(e) {
this.cardNo = e;
// console.log("careerText", this.cardNo);
// this.$emit("cardNumberFromSlider", this.cardNo);
}
}
};
</script>
<style scoped></style>
<file_sep><template>
<div style="background-color:white">
<v-layout row wrap>
<v-flex xs3 style="padding:10px" class="px-2 py-4">
<v-avatar size="85" >
<v-img :src="require('@/assets/user.png')"></v-img>
</v-avatar>
</v-flex>
<v-flex xs9 class="px-2 py-4">
<span>
<Strong style="color:#007799"><NAME></Strong>
</span>
<br />
<p>Student Of Daffodil International University</p>
<span>Career Goal : </span><span> Product Development</span>
<!-- <v-select dense :items="goals" :label="goals[0]"></v-select> -->
</v-flex>
</v-layout>
</div>
</template><file_sep><template>
<div>
<v-btn
bottom
fab
medium
block
:icon="true"
fixed
dark
color="#007790"
@click="confirmPayment"
>Pay now</v-btn
>
<v-snackbar
v-model="snackbar"
:color="color"
:multi-line="mode === 'multi-line'"
:timeout="timeout"
:vertical="mode === 'vertical'"
:top="y === 'top'"
:bottom="y == 'bottom'"
>
{{ snackbartext }}
<v-btn dark flat @click="snackbar = false">Close</v-btn>
</v-snackbar>
</div>
</template>
<script>
import { mapActions, mapGetters } from "vuex";
export default {
props: ["session"],
data: () => ({
snackbar: false,
color: "error",
mode: "",
timeout: 4000,
snackbartext: "",
y: ""
}),
methods: {
...mapActions("sessionStore", ["saveConfirmPaymentFromMentee"]),
async confirmPayment() {
// console.log("hello", Date());
var dateNow = Date();
let updateData = {
sessionStatus: "Active",
paymentStatus: "Active"
};
const confirmPayment = new Date().toISOString();
try {
const response = await this.saveConfirmPaymentFromMentee({
confirmPaymentFromMentee: updateData,
sessionId: this.session._id,
updatedTime: confirmPayment
});
const dialogvalue = false;
this.$router.push({ name: "payconfirm" });
// console.log("hello hnny obasdf");
this.$emit("dialogValue", dialogvalue);
return response;
} catch (error) {
console.log(error);
}
// console.log(response);
},
showAlert(msg, color, ypos) {
this.color = color;
this.snackbartext = msg;
this.y = ypos;
this.snackbar = true;
}
}
};
</script>
<style scoped>
.v-btn--bottom:not(.v-btn--absolute) {
bottom: 0px;
}
.v-btn--block {
margin-bottom: 0px;
}
.v-btn--icon {
background: transparent;
-webkit-box-shadow: none !important;
box-shadow: none !important;
border-radius: 0%;
-webkit-box-pack: center;
-ms-flex-pack: center;
justify-content: center;
min-width: 0;
/* width: 100%; */
}
</style>
<file_sep>var _ = require("lodash");
const FormatUtil = {
/**
* format question accroding to the format requried by the component
* @param rawQuestions : raw questions array
*/
formatQuestions: function(rawQuestions) {
const rawQuestionArray = rawQuestions.slice();
let sortedRawQuestionArray = _.sortBy(rawQuestionArray, ["questionNo"]);
let formattedQuestions = [];
for (let index = 0; index < sortedRawQuestionArray.length; index++) {
let singleQuestion = sortedRawQuestionArray[index];
let formattedSingleQuestion = {
id: singleQuestion.questionNo,
question: singleQuestion.question,
options: [
{
no: "A",
option: singleQuestion.answers.optionA.option,
status: singleQuestion.answers.optionA.isCorrect
},
{
no: "B",
option: singleQuestion.answers.optionB.option,
status: singleQuestion.answers.optionB.isCorrect
},
{
no: "C",
option: singleQuestion.answers.optionC.option,
status: singleQuestion.answers.optionC.isCorrect
},
{
no: "D",
option: singleQuestion.answers.optionD.option,
status: singleQuestion.answers.optionD.isCorrect
}
]
};
formattedQuestions.push(formattedSingleQuestion);
}
return formattedQuestions;
}
};
export { FormatUtil };
<file_sep>import { state } from "./skillTest.states";
import { getters } from "./skillTest.getters";
import { actions } from "./skillTest.actions";
import { mutations } from "./skillTest.mutations";
export const skillTestStore = {
namespaced: true,
state,
getters,
actions,
mutations
};
<file_sep><template>
<div style="margin-bottom:20px">
<upperFilters :topics="topics" />
<h3 style="padding-left:15px">Popular Test</h3>
<!-- popular mobile view -->
<v-layout row wrap class="hidden-md-and-up">
<v-card class="myContainer" flat>
<!-- left to right slide in mobile view -->
<v-flex class="myColumn" v-for="test in tests" :key="test._id">
<testCard :test="test" />
</v-flex>
</v-card>
</v-layout>
<!-- Desktop view -->
<v-flex md12 px-1 class="hidden-sm-and-down">
<v-layout row wrap>
<v-flex md12 px-5>
<v-layout row wrap>
<v-flex md12 lg6 px-2 py-2 v-for="test in tests" :key="test._id">
<!-- mentors card for home page -->
<testCard :test="test" />
</v-flex>
</v-layout>
</v-flex>
</v-layout>
</v-flex>
<h3 style="padding-left:15px">Suggested Tests</h3>
<!-- suggested for Desktop-->
<v-flex md12 px-1 class="hidden-sm-and-down">
<v-layout row wrap>
<v-flex md12 px-5>
<v-layout row wrap>
<v-flex md12 lg6 px-2 py-2 v-for="test in tests" :key="test._id">
<!-- mentors card for home page -->
<testCard :test="test" />
</v-flex>
</v-layout>
</v-flex>
</v-layout>
</v-flex>
<!-- suggested for mobile-->
<v-layout row wrap class="hidden-md-and-up">
<v-card class="myContainer" flat>
<!-- left to right slide in mobile view -->
<v-flex class="myColumn" v-for="test in tests" :key="test._id">
<testCard :test="test" />
</v-flex>
</v-card>
</v-layout>
<!-- Authorized -->
<h3 style="padding-left:15px">Authorizers</h3>
<div v-for="authorizer in authorizers" :key="authorizer._id">
<authorizers :authorizer="authorizer"></authorizers>
</div>
<div style="margin:20px 10px 10px 10px">
<div v-for="test in tests" :key="test._id">
<infinitScrollList :test="test"></infinitScrollList>
</div>
</div>
</div>
</template>
<script>
import { mapActions, mapGetters } from "vuex";
import infinitScrollList from "@/components/local/skillTestListing/skillTestListing.infinitScrollList.vue";
import authorizers from "@/components/local/skillTestListing/skillTestListing.authorizersCard.vue";
import upperFilters from "@/components/local/skillTestListing/skillTestListing.filters.vue";
import testCard from "@/components/local/skillTestListing/skillTestListing.card.vue";
import { constants } from "crypto";
export default {
components: {
upperFilters,
testCard,
authorizers,
infinitScrollList
},
data: () => ({
topics: ["Development", "UI/UX", "JavaScript", "HTML5"],
tests: [],
authorizers: [],
type: ""
}),
created() {
this.type = "FilteredTests";
this.fetchAllTests({ type: this.type })
.then(res => {
console.log("filtered tests: ", res);
this.tests = res;
})
.catch(err => {
this.tests = [];
console.log(err);
});
this.fetchAllAuthorizers()
.then(res => {
console.log("users", res);
this.authorizers = res;
})
.catch(err => {
this.authorizers = [];
console.log(err);
});
},
methods: {
...mapActions("skillTestStore", ["fetchAllTests", "fetchAllAuthorizers"])
}
};
</script>
<style>
@import "../../assets/styles/home.css";
@media screen and (min-width: 1055px) {
.myContainer {
display: -webkit-flex;
flex-direction: column;
-webkit-flex-direction: column;
overflow-y: scroll;
}
}
.myColumn {
margin: 20px;
-webkit-flex: 1 1 auto;
max-width: 400px;
}
@media screen and (max-width: 1055px) {
.myContainer {
white-space: nowrap;
display: -webkit-flex;
flex-direction: row;
-webkit-flex-direction: row;
overflow-y: scroll;
}
::-webkit-scrollbar {
display: none;
}
}
</style><file_sep>export const state = {
saveMenteeReviewOfSession: [],
saveAllReviewsFromMentorsAgainstMentee: []
};
<file_sep><template>
<div style="background-color:#eee" class="mb-4">
<!-- mobile layout -->
<v-layout
row
wrap
class="hidden-md-and-up"
style="background-color:#EEE;border-bottom-left-radius:25px;border-bottom-right-radius:25px"
>
<v-flex xs12 mt-3 mx-2 text-xs-center>
<v-select
solo
class="roundcombobox combobox"
v-model="model"
:items="items"
label="Interest/Industry"
>
</v-select>
</v-flex>
</v-layout>
<v-layout row wrap>
<v-flex xs12 mb-1>
<careerMatches />
</v-flex>
<v-flex xs12 mb-1>
<resultExpansion />
</v-flex>
</v-layout>
</div>
</template>
<script>
import careerMatches from "@/components/local/careerTestResult/careerTestResult.careerMatches";
import resultExpansion from "@/components/local/careerTestResult/careerTestResult.expansion";
export default {
data: () => ({
model: "",
items: [
"Technology Companies",
"Garments Industries",
"Chemicel Industries"
]
}),
components: {
careerMatches,
resultExpansion
}
};
</script>
<style>
.combobox.v-autocomplete__content.v-menu__content .v-card {
align-items: center;
display: flex;
flex-direction: column;
}
.combobox.v-text-field.v-text-field--solo .v-label {
left: 30% !important;
}
.roundcombobox.v-text-field.v-text-field--solo:not(.v-text-field--solo-flat)
> .v-input__control
> .v-input__slot {
border-radius: 25px;
}
.autoComplete.theme--light.v-text-field--solo
> .v-input__control
> .v-input__slot {
background: #eee;
}
</style>
<file_sep>export const mutations = {
saveUserBasicInfoById: (state, payload) => {
state.userBasicInfoById = payload;
},
saveUserSessionCountById: (state, payload) => {
state.userSessionCountById = payload;
},
saveAllMessages: (state, payload) => {
state.allMessages = payload;
},
saveAllNotifications: (state, payload) => {
state.allNotifications = payload;
},
saveUserDetailInfoById: (state, payload) => {
state.userDetailInfoById = payload;
},
saveAllSkills: (state, payload) => {
state.allSkills = payload;
}
};
<file_sep><template>
<div style="overflow:hidden">
<v-img :src="require('@/assets/authBackground.png')" position="top"></v-img>
<div class="curvyDiv">
<loginFormCarrier class="absoluteForm" />
</div>
</div>
</template>
<script>
export default {
components: {
loginFormCarrier
}
};
</script>
<style>
.curvyDiv {
min-height: 100%;
background-color: #007799;
overflow: hidden;
}
.absoluteForm {
position: absolute;
margin-left: -75px;
top: 47%;
left: 20%;
bottom: 0%;
}
</style>
<file_sep><template>
<!-- mobile layout -->
<div style="background-color:white;">
<v-layout row wrap class="modalHeader">
<v-img
:src="require('@/assets/paymentComplete.png')"
position="right center"
class="absoluteImageStyle"
></v-img>
<v-flex class="itemCenterBottom" xs12>
<p style="text-align:center;color:white">
Your payment
<br />have been successfully done
</p>
</v-flex>
</v-layout>
<v-layout row wrap>
<v-flex xs12 py-4 my-4>
<basic />
</v-flex>
<v-flex xs12 mx-2 pt-4 my-2>
<actionButtons class="itemCenterBottom" />
</v-flex>
</v-layout>
</div>
</template>
<script>
import basic from "@/components/local/payConfirm/payConfirm.basic";
import actionButtons from "@/components/local/payConfirm/payConfirm.actionButtons";
export default {
components: {
basic,
actionButtons
}
};
</script>
<style scoped>
.absoluteImageStyle {
position: absolute;
position: absolute;
left: 50%;
top: 4%;
margin-left: -40px;
min-width: 85px;
border-top-left-radius: 15px;
border-top-right-radius: 15px;
min-height: 85px;
}
.itemCenterBottom {
display: flex;
justify-content: flex-end;
flex-direction: column;
}
.modalHeader {
height: 180px;
background-color: #007799;
border-bottom-left-radius: 25px;
border-bottom-right-radius: 25px;
}
</style>
<file_sep><template>
<div>
<swiper :options="swiperOption" ref="mySwiper" class="swiperClass">
<swiper-slide v-for="(card, index) in visibleCards" :key="index">
<queCard
swiper="swiper-button-next"
@nextSlide="nextSlide"
:answers="answers"
:card="card"
:index="index"
></queCard>
</swiper-slide>
</swiper>
<!-- <v-carousel>
<v-carousel-item
v-for="(item,i) in items"
:key="i"
:src="item.src"
></v-carousel-item>
</v-carousel>-->
<v-btn v-if="cardEnd" color="success" elevation-0 @click="dialog = true">Save Results</v-btn>
<v-dialog v-if="dialog" v-model="dialog" hide-overlay transition="slide-x-transition">
<divisionSelector :result="result"></divisionSelector>
</v-dialog>
</div>
</template>
<script>
import divisionSelector from "./careerTest.divisionSelectModal";
import queCard from "./careerTest.card";
// import "swiper/dist/css/swiper.css";
import { swiper, swiperSlide } from "vue-awesome-swiper";
// var mySwiper = new Swiper('.swiper-container', {
// });
export default {
props: ["visibleCards", "cardNo"],
components: {
swiper,
swiperSlide,
queCard,
divisionSelector
},
data: () => ({
dialog: false,
isThrowOut: 60,
cardEnd: false,
currentNumber: 0,
answers: [
{ text: "R", value: 0 },
{ text: "I", value: 0 },
{ text: "A", value: 0 },
{ text: "S", value: 0 },
{ text: "E", value: 0 },
{ text: "C", value: 0 }
],
result: { R: 0, I: 0, A: 0, S: 0, E: 0, C: 0 },
swiperOption: {
onSlideChangeEnd: function() {
this.onSwipe();
}
}
}),
watch: {
cardNo(val) {
this.$refs.mySwiper.swiper.slideTo(val);
// console.log('hey hey',val)
}
},
computed: {
swiper() {
console.log(this.$refs.mySwiper);
return this.$refs.mySwiper.swiper;
}
},
mounted() {
this.swiper.on("slideChange", () => {
this.onSwipe(this);
});
// console.log("hello",this.$refs.mySwiper)
},
methods: {
nextSlide() {
this.$refs.mySwiper.swiper.slideNext();
},
questionNumber(el) {
// console.log(el);
this.$emit("questionNumber", el);
},
answerslist(el) {
this.answers = el;
// console.log("from career", el);
},
onSwipe(variable) {
this.result.R = (this.answers[0].value * 100) / 10;
this.result.I = (this.answers[1].value * 100) / 10;
this.result.A = (this.answers[2].value * 100) / 10;
this.result.S = (this.answers[3].value * 100) / 10;
this.result.E = (this.answers[4].value * 100) / 10;
this.result.C = (this.answers[5].value * 100) / 10;
// console.log(variable);
this.$emit("slidedQuestion", variable.swiper.activeIndex);
if (variable.swiper.activeIndex === 59) {
this.cardEnd = true;
}
}
}
};
</script>
<style scoped>
@media screen and (max-height: 500px) {
.swiperClass {
margin-bottom: 80px;
height: 360px;
}
}
</style>
<file_sep><template>
<!-- textfield dynamic container -->
<!-- if plusIcondisabled true this component will show -->
<v-layout row wrap v-show="plusIcondisabled">
<h5>Education information : 1</h5>
<v-flex xs12>
<v-text-field
:label="addComponentLabel[0]"
v-model="firstTextFirst"
required
></v-text-field>
<v-text-field
:label="addComponentLabel[1]"
v-model="secondTextFirst"
required
></v-text-field>
<v-text-field
:label="addComponentLabel[2]"
v-model="thirdTextFirst"
required
></v-text-field>
</v-flex>
<h5>Education information : 2</h5>
<v-flex xs12>
<v-text-field
:label="addComponentLabel[0]"
v-model="firstTextSecond"
required
></v-text-field>
<v-text-field
:label="addComponentLabel[1]"
v-model="secondTextSecond"
required
></v-text-field>
<v-text-field
:label="addComponentLabel[2]"
v-model="thirdTextSecond"
required
></v-text-field>
</v-flex>
<v-snackbar
v-model="snackbar"
:color="color"
:multi-line="mode === 'multi-line'"
:timeout="timeout"
:vertical="mode === 'vertical'"
:top="y === 'top'"
:bottom="y == 'bottom'"
>
{{ snackbartext }}
<v-btn dark flat @click="snackbar = false">Close</v-btn>
</v-snackbar>
</v-layout>
</template>
<script>
export default {
// props come from prouctUpload.vue
props: ["plusIcondisabled", "addComponentLabel", "addComponentPlaceholder"],
data() {
return {
snackbar: false,
color: "error",
mode: "",
timeout: 4000,
snackbartext: "",
y: "",
addComponentModelName: ""
};
},
watch: {
firstTextFirst() {
this.$emit("firstTextFirst", this.firstTextFirst);
},
secondTextFirst() {
this.$emit("secondTextFirst", this.secondTextFirst);
this.$emit("data");
},
thirdTextFirst() {
this.$emit("thirdTextFirst", this.thirdTextFirst);
},
firstTextSecond() {
this.$emit("firstTextSecond", this.firstTextSecond);
},
secondTextSecond() {
this.$emit("secondTextSecond", this.secondTextSecond);
},
thirdTextSecond() {
this.$emit("thirdTextSecond", this.thirdText);
}
},
methods: {
// canceladd will call when delete button will click it will remove this component
// it will make false plusIcondisabled
cancelAdd() {
var vm = this;
this.plusIcondisabled = false;
// console.log(
// "I am check plusIcon Desabled from dynamic create" +
// this.plusIcondisabled
// );
vm.$emit("input", this.plusIcondisabled);
},
// it will send addComponentModelName value to parent to create new
sendData() {
var vm = this;
// console.log(
// "check component text filed value come" + this.addComponentModelName
// );
this.plusIcondisabled = false;
try {
// console.log(this.firstText);
// vm.$emit("secondText", this.secondText);
// vm.$emit("thirdText", this.thirdText);
// vm.$emit("input", this.plusIcondisabled);
// console.log("passed to parent");
} catch (error) {
conosle.log(
"hello error",
this.firstText,
this.secondText,
this.thirdText
);
}
this.addComponentModelName = "";
}
}
};
</script>
<file_sep><template>
<div>
<v-img
:src="require('@/assets/paymentSlipCoin.png')"
position="right center"
class="absoluteImageStyle"
></v-img>
</div>
</template>
<style scoped>
.absoluteImageStyle {
position: absolute;
position: absolute;
left: 50%;
top: 4%;
margin-left: -40px;
min-width: 85px;
border-top-left-radius: 15px;
border-top-right-radius: 15px;
min-height: 85px;
}
.absoluteTotalPayableBox {
background-color: white;
position: absolute;
position: absolute;
left: 25%;
top: 20%;
margin-left: -55px;
min-width: 110px;
border-top-left-radius: 15px;
border-top-right-radius: 15px;
border-bottom-left-radius: 15px;
border-bottom-right-radius: 15px;
/* min-height: 110px; */
z-index: 999;
padding: 20px;
width: 80%;
}
.textStyle {
font-size: 12px;
line-height: 1;
margin-bottom: 2px;
}
</style>
<file_sep><template>
<div class="px-2" style="background-color:white">
<h3>Test Result</h3>
<v-layout row wrap v-for="(result,i) in testResult" :key="i">
<v-flex xs7 style="padding-right:5px">
<p><strong>Name</strong></p>
<span>{{result.name}}</span>
</v-flex>
<v-flex xs2>
<p><strong>Score</strong></p>
<span >{{result.score}}</span>
</v-flex>
<v-flex xs3>
<p><strong>Display on<br> profile</strong></p>
<v-switch
v-model="result.isDisplayOnProfile"
></v-switch>
</v-flex>
</v-layout>
</div>
</template>
<script>
export default {
data:()=>({
testResult:[
{
name:"HTML 5:An Introduction to web Design",
score:50,
isDisplayOnProfile:true,
},
{
name:"CSS 3:An Introduction to CSS",
score:34,
isDisplayOnProfile:false,
}
]
})
}
</script>
<style>
</style>
<file_sep><template>
<div style="padding-left:20px">
<span>
<Strong>Recipt</Strong>
</span>
<br />
<v-layout class="px-3" row wrap>
<v-flex xs6>
<v-card-text style="font-size:13px;text-align:left">
<span>Base Fee</span>
<br />
<span>Professsional Fee</span>
<br />
<span>Refreshment Fee</span>
<br />
<span>Total Fee</span>
<br />
</v-card-text>
</v-flex>
<v-flex xs3></v-flex>
<v-flex xs3>
<v-card-text style="font-size:13px;text-align:left">
<span>500</span>
<br />
<span>1500</span>
<br />
<span>500</span>
<br />
<span>2500</span>
<br />
</v-card-text>
</v-flex>
</v-layout>
</div>
</template>
<file_sep>export const getters = {
getSessionDetails: state => {
return state.sessionDetails;
},
getActiveSession: state => {
return state.activeSession;
},
getPendingSession: state => {
return state.pendingSession;
},
getCompleteSession: state => {
return state.completeSession;
},
getSessionDashboardSummery: state => {
return state.sessionDashboardSummery;
},
getSessionListById: state => {
return state.sessionListById;
}
};
<file_sep><template>
<v-card
class="elevation-1 mentorCardStyle"
style="border-radius:15px;background-color:#ECEFF1;
"
flat
>
<!-- {{mentorBasic.imageUrl}} -->
<v-img></v-img>
<v-img
:src="getImgUrl(mentorBasic.imageUrl)"
:aspect-ratio="ratio"
class="imgCardsize"
contain
v-model="ratio"
>
<v-container fill-height pa-0>
<v-list style=" background-color:transparent;">
<v-list-tile v-for="(activity, i) in mentorBasic.services" :key="i" class="listTile">
<v-list-tile-title>
<p style="color:#007790" class="subText text-xs-center" v-text="activity.name"></p>
</v-list-tile-title>
</v-list-tile>
</v-list>
</v-container>
</v-img>
<v-card-text style="text-align:center">
<span class="titleText lineHeight" style="color:#007790">{{ mentorBasic.name }}</span>
<br />
<span class="subText lineHeight" style=" white-space: normal;">
<strong>{{ mentorBasic.designation }} at {{ mentorBasic.company }}</strong>
</span>
<br />
<span class="subText">
<strong>{{ mentorBasic.address }}</strong>
</span>
<br />
<v-layout row wrap py-2 style="color:#007790;text-align:center">
<v-flex md12 text-xs-right text-sm-center>
<v-rating
readonly
v-model="mentorBasic.mentorRating"
background-color="grey lighten-3"
color="grey"
size="10"
half-increments
></v-rating>
</v-flex>
<v-flex md12 text-xs-left text-sm-center>
<span class="subText">
{{ mentorBasic.mentorRating }} ({{
mentorBasic.reviews
}}reviews)
</span>
</v-flex>
</v-layout>
<div style="height:70px">
<span class="subText">Skill:</span>
<span
class="subText"
v-for="(skill, i) in mentorBasic.skills.slice(0, 3)"
:key="i"
style="white-space: pre-wrap;"
>{{ skill.name }},</span>
</div>
</v-card-text>
<v-layout row wrap style=" ">
<v-flex xs6 class="centerItem">
<span style="font-weight: bold">
<span
style="text-decoration:line-through; color:#007790;"
>{{ mentorBasic.hourlyRate }} Tk/Sessions</span>
<br />FREE
</span>
<br />
</v-flex>
<v-flex xs6 style="border-bottom-right-radius:15px">
<v-btn
id="mentor-home-connect-btn"
large
block
class="elevation-0 titleText"
dark
color="#007790"
style="margin:0px;border-bottom-right-radius:15px"
@click.stop="dialog = true"
>Connect</v-btn>
<v-dialog
v-if="dialog"
v-model="dialog"
fullscreen
hide-overlay
transition="slide-x-transition"
style="height: 100%;background-color:#eee"
>
<v-btn
id="mentor-home-details-connect-btn"
bottom
fab
medium
block
@click="checkIsLoggedIn"
:icon="true"
fixed
dark
color="#007790"
>Connect</v-btn>
<!-- bottom sheet will open with a date picker -->
<v-bottom-sheet v-model="sheet">
<!-- bottom sheet activator template with activator-->
<!-- <template v-slot:activator> -->
<!-- </template> -->
<!-- date picker component-->
<mentorBookScheduling :mentor="mentorBasic" />
</v-bottom-sheet>
<!-- mentor profile modal component-->
<mentorProfileModal
:mentorBasic="mentorBasic"
:sheet="sheet"
@sendDialogValue="getDialogValue"
style="margin-bottom:50px;top:0"
/>
</v-dialog>
</v-flex>
<v-dialog
v-model="dialogLogin"
fullscreen
hide-overlay
transition="slide-x-transition"
style="height: 100%;background-color:#eee"
>
<auth
@backToPage="getAuthDialogValue"
@closeModalFromLogin="closeModalFromLogin"
@closeModalFromRegistration="closeModalFromRegistration"
/>
<!-- <h1>asdfasfaf</h1> -->
<!-- <login/> -->
</v-dialog>
</v-layout>
</v-card>
</template>
<script>
import { mapActions, mapGetters, mapState } from "vuex";
import auth from "@/views/modalView/auth";
import mentorBookScheduling from "@/components/local/mentorProfileModal/mentorProfileModal.scheduling";
import mentorProfileModal from "@/views/modalView/mentorProfileModal";
export default {
props: ["mentorBasic"],
components: {
auth,
mentorProfileModal,
mentorBookScheduling
},
data: () => ({
dialog: false,
sheet: false,
dialogLogin: false,
ratio: 2 //changable according to screens height
}),
mounted() {
//measure screen height and set ratio value
if (window.matchMedia("screen and (max-height: 736px)").matches) {
this.ratio = 2;
}
},
computed: {
...mapState("authStore", ["isAuthenticate"])
},
methods: {
getImgUrl(img) {
try {
// console.log("image: ", img);
if (!img || img === undefined) {
return "";
} else {
return process.env.VUE_APP_ROOT_API + "static/" + img;
}
} catch (error) {
console.log(error);
}
},
checkIsLoggedIn() {
if (this.isAuthenticate === true) {
this.sheet = true;
this.dialogLogin = false;
} else {
this.sheet = false;
this.dialogLogin = true;
}
},
closeModalFromLogin(val) {
this.dialogLogin = val;
},
closeModalFromRegistration(val) {
this.dialogLogin = val;
},
getAuthDialogValue(val) {
this.dialogLogin = val;
},
//getting a value which is sending from mentorProfileModal
getDialogValue(valueFromChild) {
this.dialog = valueFromChild;
}
}
};
</script>
<style scoped>
@import "../../assets/styles/home.css";
@media screen and (min-width: 1640px) {
.imgCardsize {
max-width: 400px;
min-width: 300px;
}
}
@media screen and (max-width: 1640px) and (min-width: 1055px) {
.imgCardsize {
max-width: 300px;
min-width: 200px;
}
}
@media screen and (max-width: 1055px) {
.imgCardsize {
max-width: 400px;
min-width: 300px;
}
}
.centerItem {
justify-content: center;
display: flex;
align-items: center;
}
.mentorCardStyle {
/* height: 350px */
}
.listTile >>> .v-list__tile {
padding: 0px 0px !important;
height: 28px !important;
}
.v-list__tile__title {
padding: 0px 5px 1px 5px;
padding-left: 5px;
background-color: white;
border-top-right-radius: 10px;
border-bottom-right-radius: 10px;
}
.v-dialog__container {
display: -webkit-box !important;
vertical-align: middle;
}
.v-btn--bottom:not(.v-btn--absolute) {
bottom: 0px;
}
.v-btn--block {
margin-bottom: 0px;
}
.v-btn--icon {
background: transparent;
-webkit-box-shadow: none !important;
box-shadow: none !important;
border-radius: 0%;
-webkit-box-pack: center;
-ms-flex-pack: center;
justify-content: center;
min-width: 0;
width: 100%;
}
</style>
<file_sep><template>
<div style="background-color:white;text-align:center" class="py-2">
<span style="color:#007790;">
<strong>Service Offered</strong>
</span>
<v-layout row wrap>
<v-flex xs1></v-flex>
<v-flex xs3 class="rightItems">
<div class="circleServiceIcon">
<v-img
:width="30"
:height="30"
:src="require('@/assets/like.png')"
></v-img>
<br />
</div>
</v-flex>
<v-flex xs4 class="centerItems">
<div class="circleServiceIcon">
<v-img
:width="30"
:height="30"
:src="require('@/assets/like.png')"
></v-img>
</div>
</v-flex>
<v-flex xs3 class="leftItems">
<div class="circleServiceIcon">
<v-img
:width="30"
:height="30"
:src="require('@/assets/like.png')"
>
</v-img>
</div>
</v-flex>
<v-flex xs1></v-flex>
</v-layout>
<v-layout row wrap>
<v-flex xs1></v-flex>
<v-flex xs3 class="rightItems">
<div class="centerItems">
<span class="caption">
{{mentorBasic.services[0].name}}
</span>
</div>
</v-flex>
<v-flex xs4 class="centerItems">
<span class="caption">
{{mentorBasic.services[1].name}}
</span>
</v-flex>
<!-- {{!!mentorBasic.services[2].name}} -->
<v-flex xs3 class="leftItems">
<div class="centerItems pr-4">
<span class="caption" v-if="!!mentorBasic.services[2].name">
{{ mentorBasic.services[2].name}}
</span>
</div>
</v-flex>
<v-flex xs1></v-flex>
</v-layout>
</div>
</template>
<script>
export default {
props:[
'mentorBasic','mentorAdditionalData'
],
data:()=>({
isDataExist:false,
}),
created(){
console.log('check services',this.mentorBasic.services[2].name);
}
};
</script>
<style scoped>
.circleServiceIcon {
background-color: #eee;
width: 40px;
height: 40px;
padding: 10px;
border-radius: 50%;
align-items: center;
display: flex;
justify-content: center;
}
.centerItems {
align-items: center;
display: flex;
justify-content: center;
}
.leftItems {
align-items: left;
display: flex;
justify-content: flex-start;
}
.rightItems {
align-items: center;
display: flex;
justify-content: flex-end;
}
</style>
<file_sep><template>
<div>
<div style="background-color:white;padding:10px">
<p>
<Strong>Rate this session</Strong>
</p>
<span>Help CareerKi serve you better</span>
<br>
<span>It will take no more than 5 seconds</span>
<br>
<div class="startRatingPosition">
<v-rating readonly v-model="rating" size="30" color="#3B4042" background-color="#3B4042"></v-rating>
</div>
</div>
</div>
</template>
<script>
export default {
data: () => ({
rating: 0
})
};
</script>
<style >
.startRatingPosition {
display: flex;
justify-content: center;
padding: 5px;
}
</style><file_sep>/* eslint-disable no-console */
import MenteeService from "../../../service/mentee.service";
export const actions = {
// get user review of a session
async fetchMenteeReviewOfSession({ commit }, { user_id, session_id }) {
try {
const response = await MenteeService.fetchMenteeReviewOfSession(
user_id,
session_id
);
commit("saveMenteeReviewOfSession", response);
return response;
} catch (error) {
console.log(error);
}
},
// get all review come from mentors against a user
async fetchAllReviewsFromMentorsAgainstMentee({ commit }, { user_id }) {
try {
const response = await MenteeService.fetchAllReviewsFromMentorsAgainstMentee(
user_id
);
commit("saveAllReviewsFromMentorsAgainstMentee", response);
return response;
} catch (error) {
console.log(error);
}
}
};
<file_sep>/* eslint-disable no-console */
import MentorService from "../../../service/mentor.service";
export const actions = {
async fetchRandomMentorData({ commit }) {
try {
const response = await MentorService.fetchRandomMentorData();
commit("saveRandomMentorData", response);
return response;
} catch (error) {
console.log(error);
}
},
// fetch a mentor's basic data by his id (api no 3)
async fetchMentorBasicDataById({ commit }, { mentor_id }) {
try {
const response = await MentorService.fetchMentorBasicDataById(mentor_id);
commit("saveMentorBasicData", response);
return response;
} catch (error) {
console.log(error);
}
},
// fetch a mentor's details data by his id (api no 4)
async fetchMentorDetailsDataById({ commit }, { mentor_id }) {
try {
const response = await MentorService.fetchMentorDetailsDataById(
mentor_id
);
commit("saveMentorDetailsData", response);
return response;
} catch (error) {
console.log(error);
}
},
// summary count of sessions of a mentor (api no 8)
async fetchMentorSessionCountById({ commit }, { mentor_id }) {
try {
const response = await MentorService.fetchMentorSessionCountById(
mentor_id
);
commit("saveMentorSessionCount", response);
return response;
} catch (error) {
console.log(error);
}
},
// get all reviews of Mentees agains a mentor (api no 12)
async fetchAllReviews({ commit }, { _id, mode, reviews_from }) {
try {
const response = await MentorService.fetchAllReviews(
_id,
mode,
reviews_from
);
commit("saveAllReviews", response);
return response;
} catch (error) {
console.log(error);
}
},
async fetchRatingAverage({ commit }, { _id, mode, reviews_from }) {
try {
const response = await MentorService.fetchRatingAverage(
_id,
mode,
reviews_from
);
commit("saveRatingAverage", response);
return response;
} catch (error) {
console.log(error);
}
}
};
<file_sep><template>
<v-card class="pagenotready px-1 py-2">
<v-img
:src="require('@/assets/construction.png')"
aspect-ratio="1"
contain
:height="200"
:width="200"
></v-img>
<v-divider></v-divider>
<v-card-text>
<h4 style="color:orangered">
This page is now <br />
in under construction
</h4>
</v-card-text>
<v-divider></v-divider>
<v-card-actions>
<v-spacer></v-spacer>
</v-card-actions>
</v-card>
</template>
<script>
export default {
methods: {}
};
</script>
<style>
.pagenotready {
align-items: center;
text-align: center;
/* justify-content: center; */
display: flex;
flex-direction: column;
}
</style>
<file_sep>import { state } from "./auth.states";
import { getters } from "./auth.getters";
import { actions } from "./auth.actions";
import { mutations } from "./auth.mutations";
export const authStore = {
namespaced: true,
state,
getters,
actions,
mutations
};
<file_sep><template>
<div>
<v-card flat class="pt-5">
<v-card-text style="font-size:12px">
<span>Date of Payment : {{ session.paymentTime }}</span>
<br />
<v-divider light class="my-1"></v-divider>
<span>Service type : Session with professional</span>
<br />
<v-divider class="my-1"></v-divider>
<span>Date of Session : {{ session.sessionModifiedStartTime }}</span>
<br />
<v-divider class="my-1"></v-divider>
<span>Services :</span>
<span v-for="(service, i) in session.serviceOffered" :key="i">
{{ service.name }}</span
>
<br />
<v-divider class="my-1"></v-divider>
<span>Session Duration : 1.5 hours</span>
<br />
<v-divider class="my-1"></v-divider>
<span>Location : {{ session.sessionRequestedSpot }} </span>
<br />
<v-divider class="my-1"></v-divider>
</v-card-text>
</v-card>
</div>
</template>
<script>
export default {
props: ["session"]
};
</script>
<file_sep><template>
<div>
<v-layout row wrap>
<h5>Education information : 1</h5>
<v-flex xs12>
<v-text-field
:label="addComponentLabel[0]"
v-model="firstEduCourse"
required
></v-text-field>
<v-text-field
:label="addComponentLabel[1]"
v-model="firstEduSession"
required
></v-text-field>
<v-text-field
:label="addComponentLabel[2]"
v-model="firstEduUniversity"
required
></v-text-field>
</v-flex>
<h5>Education information : 2</h5>
<v-flex xs12>
<v-text-field
:label="addComponentLabel[0]"
v-model="secondEduCourse"
required
></v-text-field>
<v-text-field
:label="addComponentLabel[1]"
v-model="secondEduSession"
required
></v-text-field>
<v-text-field
:label="addComponentLabel[2]"
v-model="secondEduUniversity"
required
></v-text-field>
</v-flex>
</v-layout>
<v-layout row wrap>
<v-flex xs12>
<v-btn dark color="#007799" @click="updateProfile">update</v-btn>
</v-flex>
</v-layout>
<v-snackbar
v-model="snackbar"
:color="color"
:multi-line="mode === 'multi-line'"
:timeout="timeout"
:vertical="mode === 'vertical'"
:top="y === 'top'"
:bottom="y == 'bottom'"
>
{{ snackbartext }}
<v-btn dark flat @click="snackbar = false">Close</v-btn>
</v-snackbar>
</div>
</template>
<script>
import { mapActions } from "vuex";
import { UserInfoService } from "@/service/storage.service";
export default {
props: ["userDetails"],
data: () => ({
userId: UserInfoService.getUserID(),
course: "",
session: "",
university: "",
firstEduCourse: "",
firstEduSession: "",
firstEduUniversity: "",
secondEduCourse: "",
secondEduSession: "",
secondEduUniversity: "",
addComponentLabel: ["Course Name", "Session Range", "University Name"],
snackbar: false,
color: "error",
mode: "",
timeout: 4000,
snackbartext: "",
y: ""
}),
methods: {
...mapActions("commonUserStore", ["saveUpdateProfile"]),
async updateProfile() {
var education = [
{
university: this.firstEduCourse,
subject: this.firstEduSession,
duration: this.firstEduUniversity
},
{
university: this.secondEduCourse,
subject: this.secondEduSession,
duration: this.secondEduUniversity
}
];
this.dynamicComponent = true;
// console.log("asdf");
let userData = {
education: education
};
// console.log(userData);
// console.log("my userId", this.userId);
try {
const response = await this.saveUpdateProfile({
user_data: userData,
user_id: this.userId
});
// console.log("check my response ", response);
if (!response) {
this.showAlert("Profile Update Failed!", "error", "top");
} else {
this.showAlert("Profile Update Successful!", "success", "top");
var getDialogValue = false;
this.$emit("setDialogValue", getDialogValue);
}
return response;
} catch (error) {
console.log(error);
return null;
}
},
showAlert(msg, color, ypos) {
this.color = color;
this.snackbartext = msg;
this.y = ypos;
this.snackbar = true;
}
}
};
</script>
<file_sep><template>
<div style="background-color:white;">
<h3 style="padding-left:10px">Achievements</h3>
<v-layout row wrap>
<v-flex xs4 v-for="i in 3" :key="i" class="imgCenter">
<v-img class="imgScaling" :src="require('@/assets/icons/skilltestProfile/badge.png')"></v-img>
</v-flex>
</v-layout>
</div>
</template>
<style >
.imgCenter {
padding:10px 0px 10px 0px;
display: flex;
justify-content: center;
}
.imgScaling {
max-width: 64px;
min-width: 50px;
}
</style><file_sep><template>
<div style=" text-align:center ;background-color:white;padding-top:5px">
<span>
{{ menteeDetailInfo[0].bio }}
</span>
<br />
<v-btn flat color="#007790">See More</v-btn>
</div>
</template>
<script>
export default {
props: ["menteeDetailInfo"],
async created() {},
mounted() {},
computed: {},
methods: {}
};
</script>
<file_sep><template>
<div style="background-color:white">
<!-- mobile layout -->
<div style="background-color:white;">
<v-layout row wrap class="modalHeader">
<v-btn dark icon @click="sendDialogValue()">
<v-icon>arrow_back</v-icon>
</v-btn>
</v-layout>
<basic />
</div>
<div class="mb-1">
<sessionInfo :session="session" />
</div>
<div class="mb-1">
<moneyReciept />
</div>
<div class="mb-1">
<mentorInfo :session="session" />
</div>
</div>
</template>
<script>
import basic from "@/components/local/paymentSlip/paymentSlip.basic";
import mentorInfo from "@/components/local/paymentSlip/paymentSlip.mentorInfo";
import sessionInfo from "@/components/local/paymentSlip/paymentSlip.sessionInfo";
import moneyReciept from "@/components/local/paymentSlip/paymentSlip.moneyReciept";
// api
import { UserInfoService } from "@/service/storage.service";
import { mapActions, mapGetters } from "vuex";
export default {
props: ["session"],
data: () => ({
userId: UserInfoService.getUserID()
}),
components: {
basic,
mentorInfo,
sessionInfo,
moneyReciept
},
created() {
// var userId='5cbc5e1fd7422d09ec7ec20e';
var sessionId = 1;
this.fetchSessionDetailsById({
user_id: this.userId,
session_id: sessionId
});
},
computed: {
...mapGetters("sessionStore", ["getSessionDetails"])
},
methods: {
...mapActions("sessionStore", ["fetchSessionDetailsById"]),
sendDialogValue() {
const dialogValue = false;
this.$emit("sendDialogValue", dialogValue);
}
}
};
</script>
<style scoped>
.modalHeader {
height: 180px;
background-color: #007799;
border-bottom-left-radius: 25px;
border-bottom-right-radius: 25px;
}
</style>
<file_sep><template>
<div
style="justify-content: center;
align-items: center;
display: flex;
background-color:white;
"
>
<v-card class="absoluteTotalPayableBox">
<v-layout row wrap>
<v-flex xs6>
<p style="font-size:12px; margin-bottom: 4px;">Total Paid</p>
<h2 style="color:#007799">BDT 2,500</h2>
</v-flex>
<v-flex xs6 style="text-align:right">
<span>Invoice ID</span>
<br />
<span># 007799</span>
</v-flex>
</v-layout>
</v-card>
</div>
</template>
<style scoped>
.absoluteTotalPayableBox {
background-color: white;
justify-content: center;
/* position: absolute;
position: absolute; */
/* left: 25%;
top: 50%; */
/* margin-left: -55px;
margin-top:-50px; */
min-width: 110px;
border-top-left-radius: 15px;
border-top-right-radius: 15px;
border-bottom-left-radius: 15px;
border-bottom-right-radius: 15px;
/* min-height: 110px; */
/* z-index: 999; */
padding: 20px;
width: 80%;
}
.textStyle {
font-size: 12px;
line-height: 1;
margin-bottom: 2px;
}
</style>
<file_sep>export const state = {
appTitle: "Quizards",
isProfile: false,
userID: "",
loading: 0,
UserInfo: []
};
<file_sep><template>
<div>
<div class="calenderTitleBar">
<span class="subheading font-weight-medium" style="color:white">Schedule Booking</span>
</div>
<v-card flat>
<v-layout>
<v-flex
style="justify-content:center;align-items:center;display:flex;background-color: #eee"
>
<v-date-picker
v-model="selectedDate"
:allowed-dates="allowedDates"
class="elevation-0 my-date-picker"
min="2019-05-1"
max="2019-08-30"
no-title
></v-date-picker>
</v-flex>
</v-layout>
<div>
<v-layout row wrap style="background-color:#eee;margin-bottom:30px">
<v-flex xs6 offset-md3 md3 class="px-4 pb-2">
<v-select
class="my-input"
flat
:height="5"
dense
v-model="time"
:items="timeArray"
label="Select time*"
solo
></v-select>
</v-flex>
<v-flex xs6 md3 class="px-4 pb-2">
<v-select
class="my-input"
flat
dense
v-model="location"
:height="5"
:items="spot"
label="Select a spot*"
solo
></v-select>
</v-flex>
</v-layout>
</div>
</v-card>
<v-btn
id="modal-final-connect-btn"
bottom
fab
small
block
:icon="true"
fixed
dark
color="#007790"
@click="sessionRequestSend"
>Confirm Booking Request</v-btn>
<v-snackbar
v-model="snackbar"
:color="color"
:multi-line="mode === 'multi-line'"
:timeout="timeout"
:vertical="mode === 'vertical'"
:top="y === 'top'"
:bottom="y == 'bottom'"
>
{{ snackbartext }}
<v-btn dark flat @click="snackbar = false">Close</v-btn>
</v-snackbar>
<v-dialog v-model="confirmatinDialog">
<v-card>
<v-card-text>Your request is sent</v-card-text>
<v-card-actions>
<v-btn color="blue darken-1" flat @click="confirmatinDialogClose">Ok</v-btn>
</v-card-actions>
</v-card>
</v-dialog>
</div>
</template>
<script>
// import FormDataPost from "@/service/session.service";
import { UserInfoService } from "@/service/storage.service";
import { mapActions, mapGetters } from "vuex";
import { setTimeout } from "timers";
export default {
props: ["mentor"],
components: {},
data: () => ({
confirmatinDialog: false,
userId: UserInfoService.getUserID(),
location: "",
time: "",
mentorAdditionalData: [],
spot: [],
//at start selectedDate will carry todays date
selectedDate: "",
items: ["9.00 P.M", "10.00 A.M"],
schedule: [],
timeArray: [],
arrayOfDates: [],
snackbar: false,
color: "error",
mode: "",
timeout: 4000,
snackbartext: "",
y: ""
}),
watch: {
selectedDate() {
var dateIndex = this.arrayOfDates.indexOf(this.selectedDate);
// console.log(dateIndex);
// this.timeIndex = dateIndex
this.timeArray = this.schedule[dateIndex].timeSlots;
// console.log();
}
},
async created() {
//sending request to fetchMentorSessionCountById(mentor_id) action
this.mentorAdditionalData = await this.fetchMentorDetailsDataById({
mentor_id: this.mentor._id
});
this.spot = this.mentorAdditionalData[0].mentoringPlaces;
// console.log(this.spot);
// console.log(this.mentorAdditionalData[0].schedules);
this.schedule = this.mentorAdditionalData[0].schedules;
var arrayOfDates = [];
var today = new Date();
var dd = String(today.getDate()).padStart(2, "0");
var mm = String(today.getMonth() + 1).padStart(2, "0"); //January is 0!
var yyyy = today.getFullYear();
today = yyyy + "-" + mm + "-" + dd;
//at start selectedDate will carry todays date
this.selectedDate = today;
// document.write(today);
// console.log("i am", this.Today);
this.schedule.forEach(function(date) {
var d = new Date(date.date);
// var date = d.toDateString();
// console.log("i am d" + d);
var FullYear = d.getFullYear();
var FullDate = d.getDate();
var FullMonth = d.getMonth() + 1;
var date = FullYear + "-" + 0 + FullMonth + "-" + FullDate;
arrayOfDates.push(date);
});
this.arrayOfDates = arrayOfDates;
this.selectedDate = arrayOfDates[0];
var dateIndex = this.arrayOfDates.indexOf(this.selectedDate);
this.time = this.schedule[dateIndex].timeSlots[0];
this.location = this.spot[0];
},
computed: {
// get values to direct use frontend
...mapGetters("mentorStore", ["getMentorDetailsData"])
},
methods: {
//map which actions will need
...mapActions("sessionStore", ["savePendingSessionRequest"]),
...mapActions("mentorStore", ["fetchMentorDetailsDataById"]),
allowedDates(val) {
return this.arrayOfDates.indexOf(val) !== -1;
},
async sessionRequestSend() {
// TODO: Make Invoice id
var invoiceId = Math.floor(Math.random() * 10000000 + 1);
const requestUpdatedTime = new Date().toISOString();
let postData = {
invoiceId: invoiceId,
sessionStatus: "Pending",
userId: this.userId,
mentorId: this.mentor._id,
sessionRequestedSpot: this.location,
paymentStatus: "Pending",
sessionRequestedTime: this.selectedDate + " " + this.time,
sessionStartTime: this.selectedDate + " " + this.time,
updatedTime: requestUpdatedTime
};
// console.log(postData);
if (this.location === "" || this.time === "") {
this.showAlert("Set Session Schedule Properly", "error", "top");
} else {
try {
const response = await this.savePendingSessionRequest({
pendingSessionRequest: postData
});
if (!response.data.success) {
this.showAlert(
"Failed!Check Your Internet Connection!",
"error",
"top"
);
} else {
this.confirmatinDialog = true;
this.showAlert("Successfully Sent Request!", "success", "top");
// setTimeout(function() { this.$router.push({ name: "sessionDashboard" })}, 3000);
}
// console.log("asdfasdfsdf" + response);
return response;
} catch (error) {
console.log(error);
}
}
},
confirmatinDialogClose() {
this.confirmatinDialog = false;
this.$router.push({ name: "sessionDashboard" });
},
showAlert(msg, color, ypos) {
this.color = color;
this.snackbartext = msg;
this.y = ypos;
this.snackbar = true;
}
}
};
</script>
<style scoped>
.calenderTitleBar {
justify-content: center;
align-items: center;
display: flex;
background-color: #007790;
height: 40px;
width: 100%;
border-top-left-radius: 20px;
border-top-right-radius: 20px;
}
.my-date-picker {
font-size: 14px;
font-weight: 100;
}
.my-input >>> .v-input__slot {
border-radius: 20px !important;
min-height: min-content;
font-size: 14px;
min-width: min-content;
}
.my-input >>> .v-label {
font-size: 14px;
}
.my-input >>> .theme--light.v-select .v-select__selections {
color: aqua !important;
}
.my-date-picker >>> .v-date-picker-header,
.v-date-picker-table {
background-color: #eee;
}
.my-date-picker >>> .v-date-picker-table {
background-color: #eee;
position: relative;
padding: 0px;
font-size: 10px;
height: 200px;
line-height: 0.1px;
}
.v-btn--bottom:not(.v-btn--absolute) {
bottom: 0px;
}
.v-btn--block {
margin-bottom: 0px;
}
.v-btn--icon {
background: transparent;
-webkit-box-shadow: none !important;
box-shadow: none !important;
border-radius: 0%;
-webkit-box-pack: center;
-ms-flex-pack: center;
justify-content: center;
min-width: 0;
width: 100%;
}
</style>
<file_sep>/* eslint-disable no-console */
import SessionService from "../../../service/session.service";
export const actions = {
async fetchSessionListById({ commit }, { user_id, type, mode, page_no }) {
try {
const response = await SessionService.fetchSessionListById(
user_id,
type,
mode,
page_no
);
commit("saveSessionListById", response);
return response;
} catch (error) {
console.log(error);
}
},
// get all info of a session
async fetchSessionDetailsById({ commit }, { user_id, session_id }) {
try {
const response = await SessionService.fetchSessionDetailsById(
user_id,
session_id
);
commit("saveSessionDetails", response);
return response;
} catch (error) {
console.log(error);
}
},
async fetchActiveSessionListById({ commit }, { user_id }) {
try {
const response = await SessionService.fetchActiveSessionListById(user_id);
commit("saveActiveSession", response);
return response;
} catch (error) {
console.log(error);
}
},
async fetchPendingSessionListById({ commit }, { user_id }) {
try {
const response = await SessionService.fetchPendingSessionListById(
user_id
);
commit("savePendingSession", response);
return response;
} catch (error) {
console.log(error);
}
},
async fetchCompleteSessionListById({ commit }, { user_id }) {
try {
const response = await SessionService.fetchCompleteSessionListById(
user_id
);
commit("saveCompleteSession", response);
return response;
} catch (error) {
console.log(error);
}
},
async fetchSessionDashboardSummeryById({ commit }, { user_id }) {
try {
const response = await SessionService.fetchSessionDashboardSummeryById(
user_id
);
commit("saveSessionDashboardSummery", response);
return response;
} catch (error) {
console.log(error);
}
},
async savePendingSessionRequest({ commit }, { pendingSessionRequest }) {
try {
const response = await SessionService.savePendingSessionRequest(
pendingSessionRequest
);
if (response.data.data) {
commit("savePendingSession");
return response;
}
} catch (error) {
console.log(error);
}
},
async saveActiveSessionRequest({ commit }, { activeSessionRequest }) {
try {
const response = await SessionService.saveActiveSessionRequest(
activeSessionRequest
);
// console.log(
// "i am saveactive response.data.data from action ",
// response.data.data
// );
if (response.data.data) {
commit("saveActiveSession");
return response;
}
} catch (error) {
console.log(error);
}
},
async saveCompleteSessionRequest(
{ commit },
{ completeSessionRequest, sessionId }
) {
try {
const response = await SessionService.saveCompleteSessionRequest(
completeSessionRequest,
sessionId
);
if (response.data.data) {
commit("saveCompleteSession");
return response;
}
} catch (error) {
console.log(error);
}
},
async saveAcceptRequestFromMentor(
{ commit },
{ mentorAcceptSessionRequest, sessionId }
) {
// console.log(
// "mentor Session Request from Action",
// mentorAcceptSessionRequest
// );
try {
const response = await SessionService.saveAcceptRequestFromMentor(
mentorAcceptSessionRequest,
sessionId
);
// console.log(
// "i am save saveAcceptRequestFromMentor response.data.data from action ",
// response.data.data
// );
if (response.data.data) {
commit("savePendingSession");
return response;
}
} catch (error) {
console.log(error);
}
},
async saveConfirmPaymentFromMentee(
{ commit },
{ confirmPaymentFromMentee, sessionId }
) {
try {
const response = await SessionService.saveConfirmPaymentFromMentee(
confirmPaymentFromMentee,
sessionId
);
// console.log(
// "i am save saveAcceptRequestFromMentor response.data.data from action ",
// response.data.data
// );
if (response.data.data) {
commit("saveActiveSession");
return response;
}
} catch (error) {
console.log(error);
}
}
};
<file_sep><template>
<div style="background-color:white">
<v-layout row wrap px-3 py-3>
<v-flex xs8>
<span
>{{ dateConversion(session.sessionStartTime) }}
{{ timeConversion(session.sessionStartTime) }}</span
>
<br />
<span>{{ session.sessionRequestedSpot }}</span>
<br />
<span v-for="service in session.serviceOffered" :key="service._id">
{{ service.name }},</span
>
<br />
</v-flex>
<v-flex xs4>
<span wrap>
ID:
<strong></strong> {{ session._id.slice(0, 8) }}
</span>
<span>
Status:
<strong></strong> {{ session.sessionStatus }}
</span>
</v-flex>
</v-layout>
</div>
</template>
<script>
export default {
props: ["session"],
methods: {
dateConversion(value) {
// const timeUTC = this.session.sessionRequestedTime;
const year = new Date(value).getUTCFullYear();
const month = new Date(value).getUTCMonth();
const date = new Date(value).getUTCDate();
return year + "-" + month + "-" + date;
},
timeConversion(value) {
const hours = new Date(value).getHours();
const minutes = new Date(value).getMinutes();
return hours + ":" + minutes;
}
}
};
</script>
<file_sep><template>
<div style=" text-align:center;padding-top:25px">
<div>
<span
@input="updateInput"
style="font-size:12px;line-height:1;margin-bottom: 4px;"
v-bind:contenteditable="isEditable"
>
<Strong
style="
color:#007790"
>{{ profileData.name }}</Strong
>
</span>
<br />
<span
@input="updateInput"
v-bind:contenteditable="isEditable"
class="textStyle"
>{{ profileData.designation }} at {{ profileData.company }}</span
>
<br />
<span v-bind:contenteditable="isEditable" class="textStyle mb-2">{{
profileData.address
}}</span>
<br />
<span v-bind:contenteditable="isEditable" class="textStyle">{{
profileData.email
}}</span>
<br />
<span v-bind:contenteditable="isEditable" class="textStyle">{{
profileData.number
}}</span>
<div>
<span class="textStyle">TopSkills:</span
><v-chip small
placeholder="topSkills"
v-for="(topskill, i) in profileData.topSkills"
:key="i"
class="textStyle"
>{{ topskill.name }}</v-chip
><br />
</div>
</div>
<!-- <v-btn small elevation-0 v-if="isEditable">save</v-btn> -->
</div>
</template>
<script>
export default {
props: ["profileData", "isEditable"],
data: () => ({
// isEditable: false,
name: ""
}),
methods: {
updateInput(e) {
// console.log(e.target.innerText);
// console.log(this.profileData.name)
}
},
watch: {
// name() {
// console.log(this.name);
// }
}
};
</script>
<style scoped>
.textStyle {
font-size: 12px;
line-height: 1;
margin-bottom: 4px;
}
</style>
<file_sep><template> </template>
<script>
export default {
name: "careerTestButtons",
props: ["data"],
data: () => ({
disabled: false
})
};
</script>
<file_sep><template>
<div style="background-color:#D9E4E6;padding:20px 20px 0px 20px">
<div class="topBorderRound" style="height:-webkit-fill-available;background-color:white">
<v-img
class="topBorderRound"
:aspect-ratio="3"
:src="require('@/assets/images/skillTestBackImage.jpg')"
>
<div style="text-align:right">
<v-btn @click="emitCloseEvent" style="text-align:right" icon>
<v-icon>clear</v-icon>
</v-btn>
</div>
</v-img>
<div class="cardCarrierDiv">
<v-card
class="elevation-1 questionCardDesign"
style="background-color:white; height: -webkit-fill-available;"
>
<div class="keepCenter">
<p>41/50</p>
<v-layout row wrap>
<v-flex>
<v-icon>alarm</v-icon>
</v-flex>
<v-flex>
<p style="color:#90E933">00.09:00</p>
</v-flex>
</v-layout>
<p>
View your unanswered
<br />questions
</p>
<v-layout row wrap>
<v-flex v-for="que in queNo" :key="que.no" class="px-1 py-1">
<div
:class="{ answered: que.isAnswered}"
class="elevation-1 keepCenter makeADivToButton"
>
<span>{{que.no}}</span>
</div>
</v-flex>
</v-layout>
</div>
<v-layout row wrap class="mt-5 ml-2">
<v-flex xs1>
<div style="height:20px;width:20px;background-color:#90e933 "></div>
</v-flex>
<v-flex xs5>
<span>05 unanswerd</span>
</v-flex>
<v-flex xs1>
<div style="height:20px;width:20px;background-color:#DEF; "></div>
</v-flex>
<v-flex xs5>
<span>04 answerd</span>
</v-flex>
</v-layout>
</v-card>
</div>
</div>
</div>
</template>
<script>
export default {
data: () => ({
selectedOption: "",
showAlert: true,
// buttonStyle:{
// backgroundColor:red,
// },
queNo: [
{ no: 1, isAnswered: false },
{ no: 2, isAnswered: true },
{ no: 3, isAnswered: false },
{ no: 4, isAnswered: true },
{ no: 5, isAnswered: true },
{ no: 6, isAnswered: false },
{ no: 7, isAnswered: true },
{ no: 8, isAnswered: false }
]
}),
methods: {
emitCloseEvent() {
this.$emit("closeUnanswered");
}
}
};
</script>
<style scoped>
#answeredInfo,
#colorBox {
display: inline;
}
.answered {
background-color: #90e933;
}
.keepCenter {
padding: 10px;
font-size: 16px;
color: white;
display: flex;
flex-direction: column;
justify-content: center;
align-items: center;
}
.makeADivToButton {
border-radius: 50px;
height: 40px;
width: 40px;
}
.cardCarrierDiv {
/* height: -webkit-fill-available; */
position: absolute;
top: 7%;
background-color: #d9e4e6;
border-radius: 10px;
}
.questionCardDesign {
padding: 10px;
background-color: #d9e4e6;
border-radius: 10px;
}
.topBorderRound {
position: relative;
border-top-left-radius: 15px;
border-top-right-radius: 15px;
}
</style><file_sep><template>
<div style=" text-align:center ;background-color:white;padding:10px">
<v-btn-toggle v-model="toggle_one" mandatory>
<v-btn @click="requestedButton" flat color="#007790" class="px-5">
<span>Requested</span>
</v-btn>
<v-btn @click="modifyButton" flat color="#007790" class="px-5">
<span>Modify</span>
</v-btn>
</v-btn-toggle>
<v-list two-line class="px-4">
<!-- <v-subheader inset>Folders</v-subheader> -->
<template>
<v-list-tile avatar>
<!-- <v-divider></v-divider> -->
<v-list-tile-avatar :size="avatarSize">
<v-icon small :class="[items[0].iconClass]">{{
items[0].icon
}}</v-icon>
</v-list-tile-avatar>
<v-list-tile-content style="font-size:12px">
<v-list-tile-title v-if="!dateContentEditable">
<p>{{ requestedDate }}</p>
</v-list-tile-title>
<v-select
v-if="dateContentEditable"
v-model="requestedDate"
:items="scheduleDate"
:label="requestedDate"
single-line
hide-details
></v-select>
<!-- scheduleTime -->
</v-list-tile-content>
<v-list-tile-action v-if="modify">
<v-btn
@click="editRequestedTime"
small
flat
ripple
style="font-size:12px"
>Edit</v-btn
>
</v-list-tile-action>
</v-list-tile>
<v-divider></v-divider>
<v-list-tile avatar>
<v-list-tile-avatar :size="avatarSize">
<v-icon small :class="[items[1].iconClass]">{{
items[1].icon
}}</v-icon>
</v-list-tile-avatar>
<v-list-tile-content style="font-size:12px">
<v-list-tile-title v-model="date" v-if="!dateContentEditable">
<p>{{ requestedTime }}</p>
</v-list-tile-title>
</v-list-tile-content>
<v-select
v-if="dateContentEditable"
v-model="requestedTime"
:items="scheduleTime"
:label="requestedTime"
single-line
hide-details
></v-select>
<v-list-tile-action v-if="modify">
<v-btn
@click="editRequestedTime"
small
flat
ripple
style="font-size:12px"
>Edit</v-btn
>
</v-list-tile-action>
</v-list-tile>
<v-divider></v-divider>
<v-list-tile avatar>
<v-list-tile-avatar :size="avatarSize">
<v-icon small :class="[items[2].iconClass]">{{
items[2].icon
}}</v-icon>
</v-list-tile-avatar>
<v-list-tile-content style="font-size:12px">
<v-list-tile-title>
<p v-if="!modify">{{ session.sessionRequestedSpot }}</p>
</v-list-tile-title>
<v-select
v-if="modify"
v-model="session.sessionModifiedSpot"
:items="scheduleSpot"
:label="session.sessionRequestedSpot"
></v-select>
</v-list-tile-content>
<v-list-tile-action v-if="modify">
<v-btn small flat ripple style="font-size:12px">Edit</v-btn>
</v-list-tile-action>
</v-list-tile>
<v-divider></v-divider>
<v-list-tile avatar>
<v-list-tile-avatar :size="avatarSize">
<v-icon small :class="[items[3].iconClass]">{{
items[3].icon
}}</v-icon>
</v-list-tile-avatar>
<v-list-tile-content style="font-size:12px">
<v-list-tile-title
v-for="(service, i) in session.serviceOffered"
:key="i"
>{{ service.name }}</v-list-tile-title
>
</v-list-tile-content>
</v-list-tile>
<v-divider></v-divider>
<v-list-tile avatar>
<v-list-tile-avatar :size="avatarSize">
<v-icon small :class="[items[4].iconClass]">{{
items[4].icon
}}</v-icon>
</v-list-tile-avatar>
<v-list-tile-content style="font-size:12px">
<v-list-tile-title>2000$/Session</v-list-tile-title>
</v-list-tile-content>
</v-list-tile>
<v-divider></v-divider>
</template>
</v-list>
</div>
</template>
<script>
import { mapActions } from "vuex";
export default {
props: ["session"],
data: () => ({
schedules: [],
mentorAdditionalData: [],
scheduleTime: [],
requestedDate: "",
requestedTime: "",
date: "",
// requestedSpot: "",
scheduleSpot: [],
scheduleDate: [],
toggle_one: 0,
avatarSize: 6,
modify: false,
timeContentEditable: false,
dateContentEditable: false,
spotContentEditable: false,
items: [
{
icon: "folder",
iconClass: "grey lighten-1 grey--text"
},
{
icon: "folder",
iconClass: "grey lighten-1 grey--text"
},
{
icon: "folder",
iconClass: "grey lighten-1 grey--text"
},
{
icon: "folder",
iconClass: "grey lighten-1 grey--text"
},
{
icon: "folder",
iconClass: "grey lighten-1 grey--text"
}
]
}),
async created() {
try {
this.mentorAdditionalData = await this.fetchMentorDetailsDataById({
mentor_id: this.session.mentorId._id
});
console.log(response);
} catch (error) {
console.log(error);
}
this.scheduleSpot = this.mentorAdditionalData[0].mentoringPlaces;
// convert mentors schedule datesTime to only date
this.schedules = this.mentorAdditionalData[0].schedules;
this.scheduleDate = this.mentorAdditionalData[0].schedules.map(function(
item
) {
const date = item.date;
const getYear = new Date(date).getUTCFullYear();
const getMonth = new Date(date).getUTCMonth();
const getDate = new Date(date).getUTCDate();
return getYear + "-" + getMonth + "-" + getDate;
});
console.log(this.scheduleDate);
//Convert mentee requested date & time convertion
const requestedTimeUTC = this.session.sessionRequestedTime;
const year = new Date(requestedTimeUTC).getUTCFullYear();
const month = new Date(requestedTimeUTC).getUTCMonth();
const date = new Date(requestedTimeUTC).getUTCDate();
const hours = new Date(requestedTimeUTC).getHours();
const minutes = new Date(requestedTimeUTC).getMinutes();
this.requestedDate = year + "-" + month + "-" + date;
this.requestedTime = hours + ":" + minutes;
},
watch: {
requestedDate() {
var dateIndex = this.scheduleDate.indexOf(this.requestedDate);
// console.log(dateIndex);
// this.timeIndex = dateIndex
this.scheduleTime = this.schedules[dateIndex].timeSlots;
// console.log(this.scheduleTime);
// console.log(this.requestedDate + " " + this.requestedTime)
var dateTime = new Date(
this.requestedDate + " " + this.requestedTime
).toISOString();
this.$emit("reviewedDate", dateTime);
},
requestedTime() {
var dateTime = new Date(
this.requestedDate + " " + this.requestedTime
).toISOString();
// console.log(dateTime);
this.$emit("reviewedTime", dateTime);
}
// spot() {
// this.$emit("reviewedSpot", this.requestedSpot);
// }
},
mounted() {},
computed: {},
methods: {
...mapActions("mentorStore", ["fetchMentorDetailsDataById"]),
requestedButton() {
this.modify = false;
this.timeContentEditable = false;
this.dateContentEditable = false;
this.spotContentEditable = false;
},
modifyButton() {
this.modify = true;
},
editRequestedTime() {
this.dateContentEditable = true;
}
}
};
</script>
<style scoped>
.v-list__tile__avatar {
min-width: 20px;
}
.v-btn-toggle .v-btn:first-child {
border-top-left-radius: 15px;
border-bottom-left-radius: 15px;
}
.v-btn-toggle .v-btn:last-child {
border-top-right-radius: 15px;
border-bottom-right-radius: 15px;
}
.v-btn-toggle {
border-top-left-radius: 15px;
border-bottom-left-radius: 15px;
border-top-right-radius: 15px;
border-bottom-right-radius: 15px;
}
</style>
<file_sep><template>
<div>
<v-layout row wrap style="text-align:center;color:white;padding-top:100px">
<v-flex xs12>
<h3>Sign Up to get Started</h3>
</v-flex>
<v-flex xs12 px-5 pt-3>
<register :getUsername="username" :getType="type" v-if="showRegistration"></register>
<v-layout row wrap v-if="!showRegistration">
<v-flex xs12>
<v-btn
v-if="!emailButton && mobileButton"
@click="
(emailButton = !emailButton), (mobileButton = !mobileButton)
"
>
<v-avatar px-2 size="25" color="#007799">
<v-icon small color="white">phone</v-icon>
</v-avatar>
<span class="px-2">Mobile</span>
</v-btn>
<v-container v-if="emailButton && !mobileButton">
<v-layout row wrap>
<v-flex xs12 md6 offset-md3>
<v-text-field
required
@input="$v.mobileNumber.$touch()"
@blur="$v.mobileNumber.$touch()"
:error-messages="numberErrors"
:counter="11"
type="number"
v-model="mobileNumber"
dark
color="white"
placeholder="Enter Phone"
prefix="+880"
></v-text-field>
</v-flex>
</v-layout>
<v-layout row wrap>
<v-flex xs12 text-xs-center>
<v-btn dark outline small @click="goToRegistrationWithTypeMobile">Next</v-btn>
</v-flex>
</v-layout>
</v-container>
</v-flex>
<v-flex xs12>OR</v-flex>
<v-flex>
<v-btn
v-if="emailButton && !mobileButton"
medium
@click="
(emailButton = !emailButton), (mobileButton = !mobileButton)
"
>
<v-avatar px-2 size="25" color="#007799">
<v-icon small color="white">email</v-icon>
</v-avatar>
<span class="px-2">email</span>
</v-btn>
<v-container v-if="!emailButton && mobileButton">
<v-layout row wrap>
<v-flex xs12 md6 offset-md3>
<v-text-field
:error-messages="emailErrors"
required
@input="$v.emailAddress.$touch()"
@blur="$v.emailAddress.$touch()"
type="email"
dark
color="white"
v-model="emailAddress"
placeholder="Enter Email"
></v-text-field>
</v-flex>
</v-layout>
<v-layout row wrap>
<v-flex xs12 text-xs-center>
<v-btn dark outline small @click="goToRegistrationWithTypeEmail">Next</v-btn>
</v-flex>
</v-layout>
</v-container>
</v-flex>
</v-layout>
</v-flex>
<v-flex xs12>
<v-img
class="roadPosition"
:src="require('@/assets/authAbsolutePath.png')"
:max-width="250"
:max-height="250"
></v-img>
</v-flex>
</v-layout>
</div>
</template>
<script>
import { validationMixin } from "vuelidate";
import register from "@/views/modalView/registration";
import {
required,
maxLength,
email,
minLength
} from "vuelidate/lib/validators";
export default {
components: {
register
},
// Mixins are a flexible way to distribute reusable functionalities for Vue components
/* here validationMixin has method,computed property
like functionalities which will merge with your custom functionalities
*/
mixins: [validationMixin],
// difine value of vuelidate variables
validations() {
if (!this.mobileButton) {
return {
mobileNumber: {
required,
maxLength: maxLength(11),
minLength: minLength(10)
}
};
} else if (!this.emailButton) {
return {
emailAddress: { required, email }
};
}
},
data: () => ({
showRegistration: false,
mobileNumber: "",
emailAddress: "",
mobileButton: false,
emailButton: true,
disableAnotherTextFieldValue: false,
type: "",
username: ""
}),
watch: {},
computed: {
numberErrors() {
const errors = [];
if (!this.$v.mobileNumber.$dirty) return errors;
!this.$v.mobileNumber.maxLength &&
errors.push("Mobile Number maximum of 10 characters long");
!this.$v.mobileNumber.minLength &&
errors.push("Mobile Number must be at least 10 characters long");
!this.$v.mobileNumber.required &&
errors.push("Mobile Number is required.");
return errors;
},
emailErrors() {
const errors = [];
if (!this.$v.emailAddress.$dirty) return errors;
!this.$v.emailAddress.email && errors.push("Must be a valid e-mail");
!this.$v.emailAddress.required && errors.push("E-mail is required");
return errors;
}
},
methods: {
gotoLogin() {
const showLogin = true;
this.$emit("showLogin", showLogin);
},
goToRegistrationWithTypeMobile() {
this.$v.$touch();
if (!this.$v.$invalid) {
this.showRegistration = true;
this.username = this.mobileNumber;
this.type = "number";
}
},
goToRegistrationWithTypeEmail() {
this.$v.$touch();
if (!this.$v.$invalid) {
this.showRegistration = true;
this.username = this.emailAddress;
this.type = "email";
}
}
}
};
</script>
<style scoped>
.input-group--focused > .primary--text {
caret-color: white !important;
color: white !important;
}
.theme--light.v-btn:not(.v-btn--icon):not(.v-btn--flat) {
border-radius: 25px;
}
.curvyDiv {
/* display: absolute; */
flex: 1;
height: 100%;
/* background-color: # */
/* width:fit-content; */
/* background-image:url('../../../assets/authBackground.png') */
}
</style>
<file_sep><template>
<div>
<v-img
:src="getImgUrl(mentorBasic.imageUrl)"
position="right center"
class="absoluteImageStyle"
></v-img>
<div style="padding-top:65px;text-align:center">
<p style="font-size:12px;line-height:1;margin-bottom: 4px;">
<Strong style="
color:#007790">{{ mentorBasic.name }}</Strong>
</p>
<!-- {{mentorBasic}} -->
<p class="textStyle">{{ mentorBasic.designation }}</p>
<span class="textStyle" v-for="(place,i) in mentorBasic.mentoringPlaces" :key="i">{{ place }}</span>
<br />
<span>Skilled At :</span>
<v-chip
small
v-for="skill in mentorBasic.skills"
:key="skill._id"
class="textStyle"
style="margin-top:5px;padding-bottom:10px"
>{{ skill.name }}</v-chip>
<br />
<!-- {{skill.name}} -->
<v-layout row wrap v-if="!sheet">
<br />
<v-flex xs1></v-flex>
<v-flex xs4>
<span class="textStyle" v-for="(data, i) in mentorAdditionalData" :key="i">
Completed Sessions
<br />
{{ data.mentoringCounts }}
</span>
<span class="textStyle"></span>
</v-flex>
<!-- vertical divider number 1 -->
<!-- <v-flex xs1 py-2 pl-2>
<v-divider light vertical></v-divider>
</v-flex>
<v-flex xs4 px-1>
<span class="textStyle">
Next Available
<br />Time slot <br />Sunday 9.00 PM
</span>
</v-flex>-->
<!-- vertical divider number 2 -->
<v-flex xs1 py-2>
<v-divider light vertical></v-divider>
</v-flex>
<v-flex xs4>
<span class="textStyle">
Average Rating
<br />
{{ mentorBasic.mentorRating }}
</span>
<!-- <span class="caption"></span> -->
</v-flex>
</v-layout>
</div>
</div>
</template>
<script>
export default {
props: ["mentorBasic", "sheet", "mentorAdditionalData"],
methods: {
getImgUrl(img) {
try {
// console.log("image: ", img);
if (!img || img === undefined) {
return "";
} else {
return process.env.VUE_APP_ROOT_API + "static/" + img;
}
} catch (error) {
console.log(error);
}
}
}
};
</script>
<style>
.absoluteImageStyle {
position: absolute;
position: absolute;
left: 50%;
top: 2%;
margin-left: -55px;
min-width: 110px;
border-top-left-radius: 15px;
border-top-right-radius: 15px;
min-height: 100px;
}
.textStyle {
font-size: 12px;
line-height: 1;
margin-bottom: 2px;
}
</style><file_sep><template>
<div>
<v-card style="margin:10px;padding:10px">
<v-layout row wrap>
<v-flex xs3 class="keepCenter">
<v-avatar size="50">
<v-img :src="imageUrl(authorizer.imageUrl)"></v-img>
</v-avatar>
</v-flex>
<v-flex xs5 class="subText">
<span>
<Strong>{{authorizer.name}}</Strong>
</span>
<br />
<span>{{authorizer.count}} Authorized tests</span>
<br />
<v-layout row wrap>
<v-flex xs1>{{authorizer.rating}}</v-flex>
<v-flex style="padding-left:2px">
<v-rating dense size="15" v-model="rating"></v-rating>
</v-flex>
</v-layout>
</v-flex>
<v-flex xs4 class="keepCenter">
<v-icon>arrow_right_alt</v-icon>
</v-flex>
</v-layout>
</v-card>
</div>
</template>
<script>
export default {
props: ["authorizer"],
data: () => ({
rating: 5
}),
computed: {},
methods: {
imageUrl(image) {
if (!image || image === undefined) {
return "https://placehold.it/550x300&text=Authorizer Image";
}
return process.env.VUE_APP_ROOT_API + "/static/" + image;
}
}
};
</script>
<style >
@import "../../../assets/styles/home.css";
.keepCenter {
display: flex;
justify-content: center;
align-items: center;
}
.centerOfColumn {
padding: 10px;
}
</style><file_sep><template>
<!-- home.mentor.slider.vue used in home.vue view -->
<v-layout row wrap>
<v-card class="myContainer" flat>
<!-- left to right slide in mobile view -->
<v-flex class="myColumn" v-for="mentor in mentors" :key="mentor._id">
<mentorDashboardCard :mentorBasic="mentor" />
</v-flex>
</v-card>
</v-layout>
</template>
<script>
import mentorDashboardCard from "@/components/global/global.mentor.dashboard.card";
export default {
props: [
"mentors" //mentors data
],
components: {
mentorDashboardCard
}
};
</script>
<style>
@import "../../../../assets/styles/home.css";
@media screen and (min-width: 1055px) {
.myContainer {
display: -webkit-flex;
flex-direction: column;
-webkit-flex-direction: column;
overflow-y: scroll;
}
}
.myColumn {
margin: 20px;
-webkit-flex: 1 1 auto;
max-width: 400px;
}
@media screen and (max-width: 1055px) {
.myContainer {
white-space: nowrap;
display: -webkit-flex;
flex-direction: row;
-webkit-flex-direction: row;
overflow-y: scroll;
}
::-webkit-scrollbar {
display: none;
}
}
</style>
<file_sep>export const state = {
skillTests: [],
popularTests: [],
suggestedTests: [],
test: {
id: "",
title: "",
description: "",
rating: 0,
publisher: "",
authorizedBy: "",
price: 0,
image: "",
metaTagId: [],
tag: ""
},
testDetails: {
id: "",
testTaken: 0,
approxTimeNeeded: 0,
benefits: [],
achievements: [],
others: "",
timeLimit: 0,
isPrivate: false,
isFree: true
},
questions: [],
result: { test: "", user: "", score: 0, timeElapsed: 0, testTakenTime: null },
authorizers: []
};
<file_sep><template>
<div class="px-2 py-2" style="background-color:white">
<h4 style="color:#007799">Reference</h4>
<v-list three-line>
<template v-for="(item, index) in profileData.recommendations">
<v-list-tile :key="index" avatar>
<v-list-tile-avatar>
<img :src="item.image" />
</v-list-tile-avatar>
<v-list-tile-content>
<v-list-tile-sub-title
v-html="item.comment"
></v-list-tile-sub-title>
<v-list-tile-sub-title style="color:#007799" class="px-3"
>{{ item.name }},{{ item.designation }} at
{{ item.company }}</v-list-tile-sub-title
>
</v-list-tile-content>
</v-list-tile>
</template>
</v-list>
</div>
</template>
<script>
export default {
props: ["profileData", "isEditable"]
};
</script>
<file_sep><template>
<!-- this modal is for mentee when
he find mentor on connect page
and click on mentor card to get
detail about mentor-->
<div style="background-color:#eee" class="ifScreenSizeDesktop">
<!-- mobile layout -->
<div style="background-color:white;" class="mb-1">
<v-layout row wrap class="modalHeader">
<v-btn icon @click="sendDialogValue()">
<v-icon>arrow_back</v-icon>
</v-btn>
</v-layout>
<mentorBasicInfo
:mentorBasic="mentorBasic"
:mentorAdditionalData="mentorAdditionalData"
:sheet="sheet"
/>
</div>
<div class="mb-1">
<serviceOffered :mentorBasic="mentorBasic"
:mentorAdditionalData="mentorAdditionalData"/>
</div>
<div class="mb-1">
<mentorSummary :mentorBasic="mentorBasic" />
</div>
<div class="mb-5">
<review class="mb-2" :mentorBasic="mentorBasic" />
</div>
</div>
</template>
<script>
import mentorBasicInfo from "@/components/local/mentorProfileModal/mentorProfileModal.basic";
import serviceOffered from "@/components/local/mentorProfileModal/mentorProfileModal.services";
import mentorSummary from "@/components/local/mentorProfileModal/mentorProfileModal.summary";
import review from "@/components/local/mentorProfileModal/mentorProfileModal.review";
import { mapActions, mapGetters } from "vuex";
export default {
components: {
serviceOffered,
mentorSummary,
review,
mentorBasicInfo
},
// here sheet data is as like as a dialog value(boolean) nothing else
props: ["mentorBasic", "sheet"],
data: () => ({
mentorAdditionalData: []
}),
async created() {
// TODO: mentorId data will get from props mentor
// var mentorId = 1;
//sending request to fetchMentorSessionCountById(mentor_id) action
this.mentorAdditionalData = await this.fetchMentorDetailsDataById({
mentor_id: this.mentorBasic._id
});
// console.log('mentor profile modal',this.mentorBasic._id );
//sending request to fetchMentorSessionCountById(mentor_id) action
await this.fetchMentorSessionCountById({ mentor_id: this.mentorBasic._id });
//sending request to fetchAllReviewsFromMenteesAgainstMentor(mentor_id) action
// await this.fetchAllReviewsFromMenteesAgainstMentor({
// mentor_id: this.mentorBasic._id
// });
},
computed: {
// get values to direct use frontend
...mapGetters("mentorStore", [
"getMentorDetailsData",
"getMentorSessionCount",
"getAllReviewsFromMenteesAgainstMentor"
])
},
methods: {
//map which actions will need
...mapActions("mentorStore", [
"fetchMentorDetailsDataById",
"fetchMentorSessionCountById",
// "fetchAllReviewsFromMenteesAgainstMentor"
]),
/*
just sending a changed value to close dialog
great job using emit problem solve
no need to get parents dialog value as a prop
*/
sendDialogValue() {
const dialogValue = false;
this.$emit("sendDialogValue", dialogValue);
}
}
};
</script>
<style>
.modalHeader {
height: 80px;
background-color: #eee;
border-bottom-left-radius: 25px;
border-bottom-right-radius: 25px;
}
</style>
<file_sep><template>
<div class=" px-2 py-1" style="text-align:center;background-color:white">
<span v-if="!largText">{{ profileData.bio.substring(0, 120) }}</span>
<span else>{{ profileData.bio.substring(120) }}</span>
<v-btn small flat color="#007799" @click="showMore">{{ buttonText }}</v-btn>
</div>
</template>
<script>
export default {
props: ["profileData"],
data: () => ({
largText: false,
buttonText: "See less",
isEditable: false
}),
async created() {},
mounted() {},
computed: {},
methods: {
showMore() {
this.largText = !this.largText;
if (this.buttonText === "See more") {
this.buttonText = "See Less";
// console.log(this.buttonText);
} else {
this.buttonText = "See more";
// console.log(this.buttonText);
}
}
}
};
</script>
<file_sep>/* eslint-disable no-console */
// common services for all users(mentor, mentee both)
import ApiService from "./api.service";
const CareerTestService = {
async fetchAllQuestion() {
try {
const response = await ApiService.get("/personality-test-questions");
// console.log(response);
return response.data.data;
} catch (error) {
// console.log(error.response);
}
},
async savePersonalityTestResult(test_result, user_id) {
try {
const response = await ApiService.get(
"/personality-test-results/" + user_id,
test_result
);
// console.log(response);
return response.data.data;
} catch (error) {
// console.log(error.response);
}
}
};
export default CareerTestService;
<file_sep><template>
<div class="pt-2" style="background-color:white">
<!-- {{reviewRating}} -->
<div v-for="(review,i) in reviewRating" :key="i">
<v-layout>
<v-flex xs3 style="padding:10px;justify-content: center;
display: flex;">
<v-avatar size="44" style>
<v-img :src="require('@/assets/user.png')"></v-img>
</v-avatar>
</v-flex>
<v-flex xs4>
<!-- <span>{{review.date}}</span> -->
<br>
<span>{{review.userId.name}}</span>
<br>
<v-layout>
<v-flex xs2>
<span>{{review.averageRatingFromUser}}</span>
</v-flex>
<v-flex xs>
<v-rating
readonly
v-model="review.averageRatingFromUser"
dense
size="10"
color="#3B4042"
background-color="#3B4042"
></v-rating>
</v-flex>
</v-layout>
</v-flex>
<v-flex xs5>
<span>Experience:</span>
{{review.sessionRatingFromUser.experience}}
<span>Communication:</span>
{{review.sessionRatingFromUser.communication}}
<span>Friendliness:</span>
{{review.sessionRatingFromUser.friendliness}}
</v-flex>
</v-layout>
<v-layout row wrap class="pt-1">
<v-flex xs3></v-flex>
<v-flex xs6>
<span>{{review.sessionReviewFromUser}}</span>
</v-flex>
<v-flex xs2></v-flex>
<v-divider inset class="mr-5 my-3"></v-divider>
</v-layout>
</div>
</div>
</template>
<script>
export default {
props: ["reviewRating"],
data: () => ({
experience: 4
})
};
</script><file_sep><template>
<div>
<!-- mentee image -->
<v-img
:src="require('@/assets/rahat.jpg')"
position="right center"
contain
class="absoluteImageStyle menteeImagePositioning"
></v-img>
<!-- mentor image -->
<v-img
:src="require('@/assets/demoImageMF.png')"
position="right center"
class="absoluteImageStyle mentorImagePositioning"
></v-img>
<v-layout row wrap>
<v-flex xs2>
<!-- spacing -->
</v-flex>
<!-- mentee basic information -->
<v-flex xs3 style="padding-top:75px;padding-bottom:4px;text-align:center">
<div>
<p
style="font-size:12px;line-height:1;margin-bottom: 4px;margin-top:5px"
>
<Strong
style="
color:#007790"
>{{ basicInfo.userId.name }}</Strong
>
</p>
<p class="textStyle">{{ basicInfo.userId.designation }}</p>
<p class="textStyle">{{ basicInfo.userId.address }}</p>
<v-btn large flat color="#007790" dark style="font-size:12px;"
>view</v-btn
>
</div>
</v-flex>
<v-flex xs2>
<!-- spacing -->
</v-flex>
<!-- mentor basic information -->
<v-flex
xs3
style="padding-top:75px;padding-bottom:4px;text-align :center"
>
<div>
<p
style="font-size:12px;line-height:1;margin-bottom: 4px;margin-top:5px"
>
<Strong
style="
color:#007790"
>{{ basicInfo.mentorId.name }}</Strong
>
</p>
<p class="textStyle">{{ basicInfo.mentorId.designation }}</p>
<p class="textStyle">{{ basicInfo.mentorId.address }}</p>
<v-btn flat small color="#007790" dark style="font-size:12px;"
>view</v-btn
>
</div>
</v-flex>
<v-flex xs2></v-flex>
</v-layout>
</div>
</template>
<script>
import { mapActions, mapState } from "vuex";
export default {
props: ["basicInfo"]
};
</script>
<style scoped>
.mentorImagePositioning {
left: 70%;
}
.menteeImagePositioning {
left: 30%;
}
.absoluteImageStyle {
min-width: 110px;
top: 4%;
margin-left: -55px;
height: 120px;
position: absolute;
position: absolute;
border-top-left-radius: 15px;
border-top-right-radius: 15px;
}
.textStyle {
font-size: 12px;
line-height: 1;
margin-bottom: 2px;
}
</style>
<file_sep><template>
<div style="background-color:#eee">
<!-- mobile layout -->
<div style="background-color:white;" class="mb-1">
<v-layout row wrap class="modalHeader">
<v-btn icon @click="sendDialogValue()">
<v-icon>arrow_back</v-icon>
</v-btn>
{{testData}}
</v-layout>
<basic :basicInfo="session"/>
</div>
<div class="mb-1">
<info :session="session"/>
</div>
<div class="mb-1">
<conversation v-if="noConversation"/>
</div>
<!-- -->
<!-- mode}} -->
<div v-if="mode==='Mentor'" style="background-color:white;text-align:center;padding-top:10px">
<v-btn style="border-radius:25px;color:#007799" large class="elevation-0" @click="sessionComplete">Complete</v-btn>
</div>
<v-snackbar
v-model="snackbar"
:color="color"
:multi-line="mode === 'multi-line'"
:timeout="timeout"
:vertical="mode === 'vertical'"
:top="y === 'top'"
:bottom="y == 'bottom'"
>
<v-btn dark flat @click="snackbar = false">Close</v-btn>
</v-snackbar>
</div>
</template>
<script>
// global
import basic from "@/components/global/global.MentorMenteeBasicInfo.vue";
//local
import info from "@/components/global/mobile/global.sessionDetails.info.vue";
import conversation from "@/components/local/activeSessionDetailsModal/activeSessionDetailsModal.conversation.vue";
import noConversation from "@/components/local/activeSessionDetailsModal/activeSessionDetailsModal.noConversation.vue";
// api
import { mapActions, mapGetters, mapState } from "vuex";
export default {
props: ["session","mode"],
data: () => ({
noConversation: true,
reviewedTime: "",
sessionModifiedStartTime: "",
snackbar: false,
color: "error",
timeout: 4000,
snackbartext: "",
y: "",
sessions: [],
hourlyRate: ""
}),
components: {
basic,
info,
conversation,
noConversation
},
async created() {
// try {
// const response = await this.fetchMentorBasicDataById({
// mentor_id: this.session.mentorId._id
// });
// this.hourlyRate = response[0].hourlyRate;
// this.services = response[0].services;
// } catch (error) {
// console.log(error);
// }
// console.log(loggedIn)
// console.log('hello',response[0].services);
},
computed:{
testData() {
return this.$store.state.isAuthenticate;
}
},
methods: {
//map which actions will need
...mapActions("mentorStore", ["fetchMentorBasicDataById"]),
...mapActions("sessionStore", ["saveCompleteSessionRequest"]),
async sessionComplete() {
//getting recent time
const endTime = new Date().toISOString();
// data array
let updateData = {
sessionStatus: "Complete",
sessionEndTime: endTime,
updatedTime: endTime
};
// passing updateData through action
try {
const response = await this.saveCompleteSessionRequest({
completeSessionRequest: updateData,
sessionId: this.session._id
});
// console.log(response);
if (!response.data.success) {
this.showAlert(
"Failed!Check Your Internet Connection!",
"error",
"top"
);
} else {
this.showAlert(
"Congratulation.You Complete your session successfully!",
"success",
"top"
);
this.$router.push(this.$route.query.redirect || "/sessionDashboard");
}
return response;
} catch (error) {
console.log(error);
}
},
showAlert(msg, color, ypos) {
this.color = color;
this.snackbartext = msg;
this.y = ypos;
this.snackbar = true;
},
sendDialogValue() {
const dialogValue = false;
this.$emit("sendDialogValue", dialogValue);
}
}
};
</script>
<file_sep><template>
<div style="background-color:white;height: -webkit-fill-available;">
<v-img
contain
:src="require('@/assets/images/careerKiPoster.jpg')"
:aspect-ratio="5"
class="mt-5"
></v-img>
<v-icon class="keepCenter" size="50" color="#007799">warning</v-icon>
<h1 class="keepCenter">
Open this site with your Mobile Phone/Tab.
<br />Desktop Web View is comming soon
</h1>
</div>
</template>
<style>
.keepCenter {
font-size: 25px;
text-align: center;
color: #007799;
display: flex;
flex-direction: column;
justify-content: center;
align-items: center;
}
</style>
<file_sep><template>
<div style="background-color:#eee">
<!-- mobile layout -->
<div style="background-color:white;" class="mb-1">
<v-layout row wrap class="modalHeader">
<v-btn icon @click="sendDialogValue()">
<v-icon>arrow_back</v-icon>
</v-btn>
</v-layout>
<basic :menteeDetailInfo="menteeDetailInfo" />
</div>
<div class="mb-1">
<bio :menteeDetailInfo="menteeDetailInfo" />
</div>
<div class="mb-1">
<skills :menteeDetailInfo="menteeDetailInfo" />
</div>
<div class="mb-5">
<requestDetails
:session="session"
@reviewedDate="reviewedDate"
@reviewedTime="reviewedTime"
/>
</div>
</div>
</template>
<script>
import basic from "@/components/local/acceptFromMentorModal/acceptFromMentorModal.basic";
import bio from "@/components/local/acceptFromMentorModal/acceptFromMentorModal.bio";
import skills from "@/components/local/acceptFromMentorModal/acceptFromMentorModal.skills";
import requestDetails from "@/components/local/acceptFromMentorModal/acceptFromMentorModal.reqestDetails";
import { mapActions, mapGetters } from "vuex";
export default {
props: ["session"],
data: () => ({
menteeDetailInfo: []
}),
components: {
basic,
bio,
skills,
requestDetails
},
//passing userid and session id and call fetchSessionDetailsById(user_id,session_id) action
async created() {
// var userId = '5cbc5e1fd7422d09ec7ec20e';
// var sessionId = 1;
//geting mentor basic data
const response = await this.fetchUserDetailInfoById({
user_id: this.session.userId._id
});
var index = 0;
this.menteeDetailInfo = response;
// console.log("dsfsflflfd", this.menteeDetailInfo);
},
computed: {
// TODO: should check is getSessionDetails bring data
//you will get all session information in getSessionDetails when you use it
...mapGetters("commonUserStore", ["getUserDetailInfoById"])
},
methods: {
//mapping actions
...mapActions("commonUserStore", ["fetchUserDetailInfoById"]),
sendDialogValue() {
const dialogValue = false;
this.$emit("sendDialogValue", dialogValue);
},
reviewedDate(el) {
// var changedTime = new Date(el);
console.log("i am first", el);
if (this.session.sessionRequestedTime != el) {
this.session.sessionModifiedStartTime = el;
this.session.sessionStartTime = el;
} else {
this.session.sessionModifiedStartTime = null;
this.session.sessionStartTime = this.session.sessionRequestedTime;
}
},
reviewedTime(el) {
// var changedTime = new Date(el);
console.log("i am first", el);
if (this.session.sessionRequestedTime != el) {
this.session.sessionModifiedStartTime = el;
this.session.sessionStartTime = el;
} else {
this.session.sessionModifiedStartTime = null;
this.session.sessionStartTime = this.session.sessionRequestedTime;
}
}
}
};
</script>
<style scoped>
.modalHeader {
height: 80px;
background-color: #eee;
border-bottom-left-radius: 25px;
border-bottom-right-radius: 25px;
}
</style>
<file_sep><template>
<v-card class="elevation-3">
<v-toolbar dark color="#007790">
<v-toolbar-title>Registration form</v-toolbar-title>
<v-spacer></v-spacer>
</v-toolbar>
<v-card-text>
<v-form>
<v-card-text>
<strong>Give us some information</strong>
</v-card-text>
<v-layout row wrap>
<v-flex class="pr-5 pl-5" xs12>
<v-text-field
prepend-icon="person"
id="firstname"
name="firstname"
label="Firstname"
type="text"
v-model="firstname"
></v-text-field>
<br />
<v-text-field
prepend-icon="person"
id="lastname"
name="lastname"
label="Lastname"
type="text"
v-model="lastname"
></v-text-field>
<br />
<v-text-field
prepend-icon="mobile_screen_share"
id="mobile"
name="mobile"
label="Mobile Number"
type="mobile"
v-model="email"
></v-text-field>
<br />
<v-text-field
prepend-icon="lock"
id="password"
name="<PASSWORD>"
label="<PASSWORD>"
type="password"
v-model="email"
></v-text-field>
<br />
</v-flex>
</v-layout>
</v-form>
</v-card-text>
<v-card-actions>
<router-link to="/login">
<a>Back to login</a>
</router-link>
<v-spacer></v-spacer>
<v-btn color="primary" @click="handleSubmit()">Register</v-btn>
</v-card-actions>
</v-card>
</template>
<script>
export default {
data: () => ({
email: "",
firstname: "",
lastname: "",
password: ""
})
};
</script>
<file_sep>/* eslint-disable no-console */
import { UserInfoService } from "../../../service/storage.service";
// import { UserProfileService } from "../../../services/profile.service";
export const actions = {
// async getUserInfo ({ commit }, {userID}) {
// try {
// const response = await UserProfileService.getUserProfileInfo(userID);
// // console.log(response.data.data[0]);
// commit("getUserInfo", response.data.data[0]);
// } catch (error) {
// console.log(error);
// }
// },
async setUserID({ commit }, { userID }) {
try {
const response = await UserInfoService.saveUserID(userID);
// console.log(response);
commit("setUserID", response);
} catch (error) {
console.log(error);
}
},
async setLoading({ commit }, { loadingState }) {
try {
// console.log(loadingState);
commit("setLoading", loadingState);
} catch (error) {
console.log(error);
}
},
async decrementLoading({ commit }, { loadingState }) {
try {
commit("decrementLoading", loadingState);
} catch (error) {
console.log(error);
}
}
};
<file_sep><template>
<!-- rating and review msg complete
and user clicked on submit and
preview this component will show-->
<div style="padding:10px">
<v-layout row wrap style="margin:10px 20px 20px 20px;border:1px solid grey">
<v-flex
style=" text-align: center;
display: flex;
flex-direction: column;
justify-content: center;"
xs5
>
<h1>{{average.toFixed(1)}}</h1>
<p>Over All Score</p>
</v-flex>
<v-flex xs1>
<v-divider inset vertical></v-divider>
</v-flex>
<v-flex xs6 style="margin:20px 0px 20px 0px">
<span class="spanFontSize">
<Strong>Experience</Strong>
</span>
<br>
<v-layout row wrap>
<v-flex xs6>
<v-rating
readonly
v-model="visibleCards[0].rating"
dense
size="10"
color="#3B4042"
background-color="#3B4042"
></v-rating>
</v-flex>
<v-flex xs6>(4)</v-flex>
</v-layout>
<span class="spanFontSize">
<Strong>Communication</Strong>
</span>
<br>
<v-layout row wrap>
<v-flex xs6>
<v-rating
readonly
v-model="visibleCards[1].rating"
dense
size="10"
color="#3B4042"
background-color="#3B4042"
></v-rating>
</v-flex>
<v-flex xs6>(4)</v-flex>
</v-layout>
<span class="spanFontSize">
<Strong>Friendlyness</Strong>
</span>
<br>
<v-layout row wrap>
<v-flex xs6>
<v-rating
readonly
v-model="visibleCards[2].rating"
dense
size="10"
color="#3B4042"
background-color="#3B4042"
></v-rating>
</v-flex>
<v-flex xs6>(3)</v-flex>
</v-layout>
</v-flex>
</v-layout>
<div style="padding:10px">
<p>
<Strong>Review</Strong>
</p>
<p>{{review}}</p>
</div>
</div>
</template>
<script>
export default {
props:['visibleCards','review','session'],
data: () => ({
average:0,
// experience: 4.3,
// communication: 4,
// friendlyness: 3
}),
created(){
var total = this.visibleCards[0].rating+this.visibleCards[1].rating+this.visibleCards[2].rating
console.log('created hello',total);
this.average = total/3;
this.$emit('averageRating',this.average)
}
};
</script><file_sep>export const state = {
questions: [],
testResult: []
};
<file_sep><template>
<div>
<div style="text-align:center">
<h3 flat>View Invoice</h3>
<br />
<span>Transaction done on 09 April 2019</span>
<br />
<p>Your reference number is 0349549</p>
<v-btn class="elevation-0" to="/sessionDashboard" large solo block>Done</v-btn>
</div>
</div>
</template>
<style scoped>
.theme--light.v-btn:not(.v-btn--icon):not(.v-btn--flat) {
border-radius: 25px;
}
</style>
<file_sep>export const getters = {
getIsAuthenticate: state => {
// console.log(state.isAuthenticate);
return state.isAuthenticate;
},
authenticationErrorCode: state => {
return state.authenticationErrorCode;
},
authenticationError: state => {
return state.authenticationError;
},
authenticating: state => {
return state.authenticating;
}
};
<file_sep><template>
<!-- this component used only on connect.vue view -->
<v-card flat class="mb-1" width="100%">
<v-layout row wrap pt-3>
<v-flex xs5 pl-3>
<!-- card image layout -->
<v-layout row wrap>
<v-flex xs8>
<v-img
style="border-top-left-radius:15px;border-top-right-radius:15px"
:src="getImgUrl(mentor.imageUrl)"
aspect-ratio="1.3"
position="center center"
></v-img>
</v-flex>
<v-flex xs4></v-flex>
</v-layout>
<div>
<p class="mb-1 mt-1 mr-1" style="color:#007790">
<strong>{{ mentor.name }}</strong>
</p>
<span>{{ mentor.designation }} at {{ mentor.company }}</span>
</div>
</v-flex>
<v-flex xs7>
<!-- mentor basic information list -->
<v-layout row wrap>
<v-flex xs12>
<!-- <v-icon small>location_on</v-icon> -->
<v-avatar :size="16" color="grey lighten-4">
<img :src="require('@/assets/icons/connect/location.png')" alt="avatar" />
</v-avatar>
<!-- {{mentor}} -->
<span>{{ place }}</span>
<br />
<v-avatar :size="16" color="grey lighten-4">
<img :src="require('@/assets/icons/connect/skill.png')" alt="avatar" />
</v-avatar>
<span
style=" word-break: break-all;"
v-for="skill in mentor.skills.slice(0, 3)"
:key="skill._id"
>{{ skill.name }},</span>
<br />
<v-avatar :size="16" color="grey lighten-4">
<img :src="require('@/assets/icons/connect/service.png')" alt="avatar" />
</v-avatar>
<span v-for="service in mentor.services" :key="service._id">{{ service.name }},</span>
<br />
<v-avatar :size="16" color="grey lighten-4">
<img :src="require('@/assets/icons/connect/rate.png')" alt="avatar" />
</v-avatar>
<span>Rating: {{ mentor.rating }}</span>
<br />
<v-avatar :size="16" color="grey lighten-4">
<img :src="require('@/assets/icons/connect/price.png')" alt="avatar" />
</v-avatar>
<span style="font-weight: bold">
<span style="text-decoration:line-through">{{ mentor.hourlyRate }} Tk/Sessions</span> FREE
</span>
<v-card-actions>
<v-spacer></v-spacer>
<!--TODO: pass mentorId after click-->
<!-- onClick open mentorProfileModal -->
<v-btn
id="mentor-card-view-btn"
small
flat
color="#007790"
@click.stop="dialog = true"
>View</v-btn>
<!-- mentorProfileModal dialog -->
<v-dialog
v-if="dialog"
v-model="dialog"
fullscreen
hide-overlay
transition="slide-x-transition"
style="height: 100%;background-color:#eee"
class="ifScreenSizeDesktop"
>
<v-btn
id="mentor-card-view-connect-btn"
@click="checkIsLoggedIn"
bottom
fab
medium
block
:icon="true"
fixed
dark
color="#007790"
>Book a Session</v-btn>
<!-- bottom sheet will open with a date picker -->
<v-bottom-sheet v-model="sheet">
<!-- bottom sheet activator template with activator-->
<!-- <template v-slot:activator>
</template>-->
<!-- date picker component-->
<mentorBookScheduling :mentor="mentor" />
</v-bottom-sheet>
<!-- mentor profile modal component-->
<mentorProfileModal
:mentorBasic="mentor"
:sheet="sheet"
@sendDialogValue="getDialogValue"
style="margin-bottom:50px;top:0"
/>
</v-dialog>
</v-card-actions>
</v-flex>
<v-dialog
v-model="dialogLogin"
fullscreen
hide-overlay
transition="slide-x-transition"
style="height: 100%;background-color:#eee"
>
<auth
@backToPage="getAuthDialogValue"
@closeModalFromLogin="closeModalFromLogin"
@closeModalFromRegistration="closeModalFromRegistration"
/>
<!-- <h1>asdfasfaf</h1> -->
<!-- <login/> -->
</v-dialog>
</v-layout>
</v-flex>
</v-layout>
</v-card>
</template>
<script>
import { mapActions, mapGetters, mapState } from "vuex";
import auth from "@/views/modalView/auth";
import mentorBookScheduling from "@/components/local/mentorProfileModal/mentorProfileModal.scheduling";
import mentorProfileModal from "@/views/modalView/mentorProfileModal";
export default {
components: {
auth,
mentorProfileModal,
mentorBookScheduling
},
//getting mentor data from connect view
props: ["mentor", "isLoggedIn"],
data: () => ({
dialog: false,
sheet: false,
dialogLogin: false
}),
computed: {
...mapState("authStore", ["isAuthenticate"]),
place() {
return this.mentor.mentoringPlaces.join(",");
}
},
methods: {
getImgUrl(img) {
try {
// console.log("image: ", img);
if (!img || img === undefined) {
return "";
} else {
return process.env.VUE_APP_ROOT_API + "static/" + img;
}
} catch (error) {
console.log(error);
}
},
checkIsLoggedIn() {
if (this.isAuthenticate === true) {
this.sheet = true;
this.dialogLogin = false;
} else {
this.sheet = false;
this.dialogLogin = true;
}
},
getAuthDialogValue(val) {
// console.log(val);
this.dialogLogin = val;
// this.sheet = !val;
},
closeModalFromLogin(val) {
this.dialogLogin = val;
this.sheet = !val;
},
closeModalFromRegistration(val) {
this.dialogLogin = val;
this.sheet = !val;
},
//getting a value which is sending from mentorProfileModal
getDialogValue(valueFromChild) {
this.dialog = valueFromChild;
}
}
};
</script>
<style scoped>
@media screen and (min-width: 640px) {
.ifScreenSizeDesktop {
padding-left: 50px;
padding-right: 50px;
}
}
@media screen and (max-width: 640px) {
.ifScreenSizeDesktop {
padding-left: 0px;
padding-right: 0px;
}
}
.v-dialog__container {
display: -webkit-box !important;
vertical-align: middle;
}
.v-btn--bottom:not(.v-btn--absolute) {
bottom: 0px;
}
.v-btn--block {
margin-bottom: 0px;
}
.v-btn--icon {
background: transparent;
-webkit-box-shadow: none !important;
box-shadow: none !important;
border-radius: 0%;
-webkit-box-pack: center;
-ms-flex-pack: center;
justify-content: center;
min-width: 0;
width: 100%;
}
span {
font-size: 15px;
font-weight: 300;
}
</style>
<file_sep><template>
<v-app class="mainStyle">
<!-- navigation bar components -->
<navbar class="hidden-md-and-up" v-if="this.$router.currentRoute.path!='/connect-promo'" />
<navbarForDesktop
v-if=" this.$router.currentRoute.path!='/connect-promo'"
class="hidden-sm-and-down mb-5"
/>
<!-- mobile -->
<v-layout row wrap class="hidden-md-and-up">
<!-- {{userId}} -->
<v-content :class="[{'v-content-promo' : isPromoPage}]">
<router-view :isLoggedIn="isLoggedIn"></router-view>
</v-content>
</v-layout>
<BottomNav
:class="[{'v-content-promo' : isPromoPage}]"
class="hidden-md-and-up"
v-if=" this.$router.currentRoute.path!='/connect-promo'"
/>
<!-- <warningPage class="hidden-sm-and-down" /> -->
<v-layout row wrap class="hidden-sm-and-down mt-3" v-if="isDesktopView">
<v-flex md2 v-if="this.$router.currentRoute.path!='/connect-promo'"></v-flex>
<v-flex md8 xs12 px-1 v-if="this.$router.currentRoute.path!='/connect-promo'">
<router-view></router-view>
</v-flex>
<v-flex md12 xs12 px-1 v-if="this.$router.currentRoute.path==='/connect-promo'">
<v-content :class="[{'v-content-promo' : isPromoPage}]">
<router-view></router-view>
</v-content>
</v-flex>
<v-flex
md2
fill-height
class="right-sidebar hidden-sm-and-down"
v-if="this.$router.currentRoute.path!='/connect-promo'"
></v-flex>
<v-flex md2 v-if="this.$router.currentRoute.path ==='/connect-promo'"></v-flex>
</v-layout>
</v-app>
</template>
<script>
import warningPage from "@/views/pageView/desktopWarningPage.vue";
import { TokenService } from "@/service/storage.service";
// mobile
import informaticsForMobile from "@/components/local/home/mobile/home.informatics.mobile";
import steps from "@/components/local/home/mobile/home.steps";
//web
import leftVerticalTab from "@/components/global/web/global.leftside.tab";
import informaticsForWeb from "@/components/local/home/web/home.informatics.web";
import userShortInfoProfile from "@/components/local/home/web/home.user.profile";
import navbar from "@/components/global/navbar";
import features from "@/components/local/home/web/home.featurelist";
import BottomNav from "@/components/global/mobile/global.bottom.nav";
import navbarForDesktop from "@/components/global/web/navbarForDesktop.vue";
// api
// import { UserInfoService } from "@/service/storage.service";
import { mapActions, mapGetters, mapState } from "vuex";
import { UserInfoService } from "@/service/storage.service";
//data
export default {
name: "App",
components: {
navbarForDesktop,
warningPage,
features,
informaticsForMobile,
informaticsForWeb,
steps,
userShortInfoProfile,
leftVerticalTab,
BottomNav,
navbar
},
data: () => ({
color: "grey lighten-3",
// isTabViewValue: false,
isDesktopView: true,
height: 300,
tabs: [
{
text: "Connect",
icon: "map-marked-alt"
},
{
text: "Connect",
icon: "code-branch"
},
{
text: "Skill",
icon: "brain"
}
],
sliderColor: "#007790",
value: null,
verticalText: false,
model: "",
items: [
"Technology Companies",
"Garments Industries",
"Chemicel Industries"
]
}),
created() {
if (
this.$route.path != "/careerTest" ||
this.$route.path != "/career-promo"
) {
console.log("hello");
} else {
console.log("jello");
}
// console.log('asdfdas',this.$route);
if (window.innerWidth > 900) {
this.isDesktopView = true;
} else {
this.isDesktopView = false;
}
// console.log('created hook called for app vue')
// console.log(this.$store.states.accessToken);
//sending request for userbasic info and session count
if (this.userId) {
this.fetchUserBasicInfoById({ user_id: this.userId }).catch(
function onError(error) {
console.log(error);
}
);
} else {
return null;
}
// this.fetchUserSessionCountById({ user_id: userId });
},
computed: {
isLoggedIn: function() {
return !!TokenService.getToken();
},
userId() {
// console.log(this.$vuetify.theme);
return UserInfoService.getUserID();
},
...mapGetters("authStore", ["getIsAuthenticate"]),
...mapGetters("industryStore", ["getIndustries"]),
...mapGetters("commonUserStore", [
"getUserBasicInfoById",
"getUserSessionCountById"
]),
// showTopBottom() {
// // console.log("name of route from computed: ", this.$route.path);
// var path = this.$route.path;
// if (path.includes("auth")) {
// return false;
// } else {
// return true;
// }
// },
isPromoPage() {
return this.$route.path === "/connect-promo";
}
},
methods: {
...mapActions("commonUserStore", [
"fetchUserBasicInfoById",
"fetchUserSessionCountById"
])
}
};
</script>
<style>
.mainStyle {
font-family: "Poppins", sans-serif;
}
p {
color: #606771;
}
span {
font-family: "Poppins", sans-serif;
color: #606771;
}
.v-dialog--fullscreen {
background-color: white;
}
.left-sidebar {
/* display: flex; */
position: sticky;
position: -webkit-sticky;
top: 0;
/* top:0; */
}
.right-sidebar {
padding-top: 25px;
position: sticky;
position: -webkit-sticky;
top: 0;
background-color: white;
}
.v-content-promo {
padding: 0px !important;
}
</style>
<file_sep><template>
<div>
<v-layout row wrap style="text-align:center;color:white;padding-top:100px">
<v-flex xs12>
<h3>Login</h3>
</v-flex>
<v-flex xs12 px-5 mt-5 my-3>
<v-layout row wrap>
<v-flex xs12 md6 offset-md3>
<v-text-field
label="Type a Username"
placeholder="Enter your username"
color="secondary"
dark
v-model="username"
></v-text-field>
</v-flex>
<v-flex>
<v-layout row wrap>
<v-flex xs12 offset-md3 md6>
<v-text-field
dark
v-model="password"
:append-icon="show2 ? 'visibility' : 'visibility_off'"
color="secondary"
:type="show2 ? 'text' : 'password'"
label="Password"
hint="At least 8 characters"
placeholder="Enter your password"
@click:append="show2 = !show2"
></v-text-field>
</v-flex>
</v-layout>
<v-layout row wrap>
<v-flex xs12 text-xs-center>
<v-btn id="user-login-arrow-btn" dark outline small @click="handleSubmit">Login</v-btn>
</v-flex>
</v-layout>
<h4 class="py-4">
Do not have an account?
<v-btn id="user-register-btn" dark flat small white @click="gotoRegInfo">
<span style="text-decoration: underline; color:white;">Sign Up</span>
</v-btn>
</h4>
</v-flex>
</v-layout>
</v-flex>
<v-flex xs12>
<v-img
class="roadPosition"
:src="require('@/assets/authAbsolutePath.png')"
:max-width="350"
:max-height="250"
></v-img>
</v-flex>
</v-layout>
<v-snackbar
v-model="snackbar"
:color="color"
:multi-line="mode === 'multi-line'"
:timeout="timeout"
:vertical="mode === 'vertical'"
:top="y === 'top'"
:bottom="y == 'bottom'"
>
{{ snackbartext }}
<v-btn dark flat @click="snackbar = false">Close</v-btn>
</v-snackbar>
</div>
</template>
<script>
import { mapGetters, mapActions } from "vuex";
export default {
data: () => ({
show2: false,
username: "",
password: "",
snackbar: false,
color: "error",
mode: "",
timeout: 4000,
snackbartext: "",
y: ""
}),
computed: {
...mapGetters("authStore", [
"authenticating",
"authenticationError",
"authenticationErrorCode"
])
},
methods: {
...mapActions("authStore", ["login"]),
handleSubmit() {
// Perform a simple validation that username and password have been typed in
if (this.username != "" && this.password != "") {
const response = this.login({
username: this.username.trim(),
password: <PASSWORD>
});
response.then(res => {
if (!res) {
this.showAlert("Login Failed!", "error", "top");
} else {
if (typeof res.data != "undefined") {
if (
typeof res.data.success != "undefined" &&
res.data.success == true
) {
this.showAlert("Login success!", "success", "top");
this.$router.push(this.$route.query.redirect || "/connect");
const loginDialog = false;
this.$emit("loginDialog", loginDialog);
} else {
this.showAlert(res.data.message, "warning", "top");
}
} else {
//error caught in the authentication, so msg as it is returned from service through action
this.showAlert(res.message, "warning", "top");
}
}
});
this.username = "";
this.password = "";
}
},
gotoRegInfo() {
const showRegInfo = true;
this.$emit("showRegInfo", showRegInfo);
},
showAlert(msg, color, ypos) {
this.color = color;
this.snackbartext = msg;
this.y = ypos;
this.snackbar = true;
}
}
};
</script>
<style scoped>
.input-group--focused > .primary--text {
caret-color: white !important;
color: white !important;
}
.curvyDiv {
/* display: absolute; */
flex: 1;
height: 100%;
/* background-color: # */
/* width:fit-content; */
/* background-image:url('../../../assets/authBackground.png') */
}
</style>
<file_sep><template>
<div>
<v-layout row wrap>
<v-flex xs6>
<v-btn bottom fab style="width: 50%;color:#007790" :icon="true" fixed color="#D9E4E6">Cancel</v-btn>
</v-flex>
<v-flex xs6>
<v-btn
bottom
fab
style="width: 50%;"
:icon="true"
@click="confirmSessionRequestFromMentor"
fixed
dark
color="#007790"
>Confirm</v-btn>
</v-flex>
</v-layout>
<v-snackbar
v-model="snackbar"
:color="color"
:multi-line="mode === 'multi-line'"
:timeout="timeout"
:vertical="mode === 'vertical'"
:top="y === 'top'"
:bottom="y == 'bottom'"
>
{{ snackbartext }}
<v-btn dark flat @click="snackbar = false">Close</v-btn>
</v-snackbar>
</div>
</template>
<script>
import { mapActions, mapGetters } from "vuex";
export default {
props: ["session"],
data: () => ({
reviewedTime: "",
sessionModifiedStartTime: "",
snackbar: false,
color: "error",
mode: "",
timeout: 4000,
snackbartext: "",
y: "",
sessions: [],
hourlyRate: ""
}),
async created() {
const requestReviewedTimeShow = new Date().toISOString();
console.log("hey how are you", requestReviewedTimeShow);
try {
const response = await this.fetchMentorBasicDataById({
mentor_id: this.session.mentorId._id
});
this.hourlyRate = response[0].hourlyRate;
this.services = response[0].services;
} catch (error) {
console.log(error);
}
// console.log('hello',response[0].services);
},
mounted() {
// console.log(this.session);mentor_id
},
methods: {
//map which actions will need
...mapActions("mentorStore", ["fetchMentorBasicDataById"]),
...mapActions("sessionStore", ["saveAcceptRequestFromMentor"]),
async confirmSessionRequestFromMentor() {
// console.log("hello", this.session);
const requestReviewedTime = new Date().toISOString();
var dateNow = Date();
let updateData = {
sessionStatus: "Pending",
requestReviewedTime: requestReviewedTime,
sessionModifiedStartTime: this.session.sessionModifiedStartTime,
sessionModifiedSpot: this.session.sessionModifiedSpot,
serviceOffered: this.services,
paymentStatus: "Pending",
userId: this.session.userId._id,
mentorId: this.session.mentorId._id,
// sessionRequestedSpot: this.session.sessionRequestedSpot,
hourlyRate: this.hourlyRate,
updatedTime: requestReviewedTime
};
// console.log("row data", updateData);
try {
const response = await this.saveAcceptRequestFromMentor({
mentorAcceptSessionRequest: updateData,
sessionId: this.session._id
});
// console.log(response);
if (!response.data.success) {
this.showAlert(
"Failed!Check Your Internet Connection!",
"error",
"top"
);
} else {
this.showAlert(
"Successfully Accepted Mentee Request!",
"success",
"top"
);
this.$router.push(this.$route.query.redirect || "/sessionDashboard");
}
return response;
} catch (error) {
console.log(error);
}
},
showAlert(msg, color, ypos) {
this.color = color;
this.snackbartext = msg;
this.y = ypos;
this.snackbar = true;
}
}
};
</script>
<style scoped>
.v-btn--bottom:not(.v-btn--absolute) {
bottom: 0px;
}
.v-btn--block {
margin-bottom: 0px;
}
.v-btn--icon {
background: transparent;
-webkit-box-shadow: none !important;
box-shadow: none !important;
border-radius: 0%;
-webkit-box-pack: center;
-ms-flex-pack: center;
justify-content: center;
min-width: 0;
/* width: 100%; */
}
</style>
<file_sep><template>
<!-- bottom nav -->
<v-card height="30" flat>
<v-bottom-nav :active.sync="bottomNav" :value="true" fixed shift>
<!-- @click="underConstructionDialog=true" -->
<v-btn color="grey" flat value="explore" small disabled>
<span class="titleText">Explore</span>
<v-avatar size="20">
<v-img :src="require('@/assets/explore.png')"></v-img>
</v-avatar>
</v-btn>
<!-- onclick change route name -->
<v-btn @click="navigateTo({ name: 'connect' })" small color="teal" flat value="connect">
<span class="titleText">Connect</span>
<v-avatar size="20">
<v-img :src="require('@/assets/connect.png')"></v-img>
</v-avatar>
</v-btn>
<!-- @click="navigateTo({ name: 'careerTest' })" -->
<v-btn
@click="navigateTo({ name: 'careerTest' })"
small
color="grey"
flat
value="careerTest"
>
<span class="titleText">Career Test</span>
<v-avatar size="20">
<v-img :src="require('@/assets/careerTest.png')"></v-img>
</v-avatar>
</v-btn>
</v-bottom-nav>
<v-dialog
v-model="underConstructionDialog"
transition="slide-x-transition"
style="background-color:#eee"
>
<pageNotReady />
</v-dialog>
<v-dialog
v-model="authenticationDialog"
fullscreen
hide-overlay
transition="slide-x-transition"
style="height: 100%;background-color:#eee"
>
<auth @backToPage="getAuthDialogValue" @closeModalFromLogin="closeModalFromLogin" />
<!-- <h1>asdfasfaf</h1> -->
<!-- <login/> -->
</v-dialog>
</v-card>
</template>
<script>
import pageNotReady from "@/components/global/global.PageNotReady.vue";
import { mapActions, mapGetters, mapState } from "vuex";
import auth from "@/views/modalView/auth";
export default {
props: ["isLoggedIn"],
components: {
auth,
pageNotReady
},
data: () => ({
underConstructionDialog: false,
bottomNav: "connect",
authenticationDialog: false
}),
computed: {
...mapState("authStore", ["isAuthenticate"])
},
methods: {
closeModalFromLogin(val) {
this.authenticationDialog = val;
},
getAuthDialogValue(val) {
this.authenticationDialog = val;
},
//navigate to route
navigateTo(route) {
// console.log("I am route", route.name);
if (this.isAuthenticate === false && route.name === "careerTest") {
this.authenticationDialog = true;
} else {
this.$router.push(route);
}
}
}
};
</script>
<file_sep><template>
<div style=" text-align:center ;background-color:white;" class="py-4">
<span>
<Strong style="color:#007799">Session Status :</Strong>
{{ getSessionDetails.sessionStatus }}
</span>
<br />
<span>
<Strong style="color:#007799">ID : </Strong>
{{ getSessionDetails._id.slice(0, 8) }}
</span>
</div>
</template>
<script>
export default {
props: ["getSessionDetails"],
async created() {},
mounted() {},
computed: {},
methods: {}
};
</script>
<style></style>
<file_sep>/* eslint-disable no-console */
/* mentee related all services.
its only for mentee services not for all users
all users common services are at user.service.js
*/
import ApiService from "./api.service";
const menteeService = {
// get mentee review of a session (api no 10)
async fetchMenteeReviewOfSession(mentee_id, session_id) {
try {
const response = await ApiService.get(
"user-sessions?id=" +
mentee_id +
"&operation=Reviews (id= " +
session_id +
")"
);
// console.log(response);
return response.data.data;
} catch (error) {
// console.log(error.response);
}
},
// get all review come from mentors against a mentee (api no 11)
async fetchAllReviewsFromMentorsAgainstMentee(mentee_id) {
try {
const response = await ApiService.get(
"user-sessions?userId=" +
mentee_id +
"&operation=UserReviewsFromMentors"
);
// console.log(response);
return response.data.data;
} catch (error) {
// console.log(error.response);
}
},
async fetchAllReviewsFromMenteeAgainstMentor(mentee_id) {
try {
const response = await ApiService.get(
"user-sessions?userId=" + mentee_id + "&operation=UserReviews"
);
// console.log(response);
return response.data.data;
} catch (error) {
// console.log(error.response);
}
}
};
export default menteeService;
<file_sep><template>
<div>
<v-layout row wrap>
<h4>Add Skills</h4>
<v-flex xs12>
<v-select
attach
chips
label="Add Skills"
multiple
solo
class="px-2 py-2 autoComplete"
v-model="userDetails.skills"
:items="skills"
item-text="name"
item-value="_id"
></v-select>
</v-flex>
<h4>Add Top Skills</h4>
<v-flex xs12>
<v-select
chips
label="Top Skills"
multiple
solo
class="px-2 py-2 autoComplete"
v-model="userDetails.topSkills"
:items="skills"
item-text="name"
item-value="_id"
></v-select>
</v-flex>
<v-flex xs12>
<v-btn color="#007799" dark @click="updateProfile()">Update</v-btn>
</v-flex>
</v-layout>
<v-snackbar
v-model="snackbar"
:color="color"
:multi-line="mode === 'multi-line'"
:timeout="timeout"
:vertical="mode === 'vertical'"
:top="y === 'top'"
:bottom="y == 'bottom'"
>
{{ snackbartext }}
<v-btn dark flat @click="snackbar = false">Close</v-btn>
</v-snackbar>
</div>
</template>
<script>
import { mapActions } from "vuex";
import { UserInfoService } from "@/service/storage.service";
export default {
data: () => ({
userId: UserInfoService.getUserID(),
snackbar: false,
color: "error",
mode: "",
timeout: 4000,
snackbartext: "",
y: ""
}),
props: ["userDetails", "skills"],
methods: {
...mapActions("commonUserStore", ["saveUpdateProfile"]),
async updateProfile() {
this.dynamicComponent = true;
// console.log("asdf");
let userData = {
topSkills: this.userDetails.topSkills,
skills: this.userDetails.skills
};
try {
const response = await this.saveUpdateProfile({
user_data: userData,
user_id: this.userId
});
// console.log("check my response ", response);
if (!response) {
this.showAlert("Profile Update Failed!", "error", "top");
} else {
this.showAlert("Profile Update Successful!", "success", "top");
var getDialogValue = false;
this.$emit("setDialogValue", getDialogValue);
}
return response;
} catch (error) {
console.log(error);
return null;
}
},
showAlert(msg, color, ypos) {
this.color = color;
this.snackbartext = msg;
this.y = ypos;
this.snackbar = true;
}
}
};
</script>
<file_sep>import { state } from "./base.state";
import { getters } from "./base.getters";
import { actions } from "./base.actions";
import { mutations } from "./base.mutations";
export const baseStore = {
namespaced: true,
state,
getters,
actions,
mutations
};
<file_sep><template>
<v-card style="background-color:#eeee;height:100%" flat class="pb-3">
<!-- tab container .here tab text and tabs become vertical-->
<!-- TODO: Need a day to fix it properly or use another -->
<div
:style="containerStyle"
style="background-color:#eeee"
class="vertical-tabs centerItem my-2"
:class="{
'vertical-tabs--vertical-text': verticalText,
'vertical-tabs--horizontal-text': !verticalText
}"
>
<v-tabs
v-model="active"
:show-arrows="verticalText"
:color="color"
:slider-color="sliderColor"
:style="tabsStyle"
>
<!-- divider -->
<div style="margin-right:10px;margin-left:4px">
<v-tab @click="navigateTo({ name: 'connect' })">
<font-awesome-icon size="2x" icon="code-branch" class="mx-4" />
<span>Connect</span>
</v-tab>
</div>
<v-divider vertical light class="mr-2"></v-divider>
<v-divider vertical light class="ml-3"></v-divider>
<div style="margin-right:10px;margin-left:4px">
<v-tab @click="navigateTo({ name: 'psychometricTest' })">
<font-awesome-icon size="2x" icon="brain" class="mx-4" />
<span>Career Test</span>
</v-tab>
</div>
<v-divider vertical light class="ml-3"></v-divider>
<div style="margin-right:10px;margin-left:4px">
<v-tab disabled>
<font-awesome-icon size="2x" icon="map-marked-alt" class="mx-4" />
<span>Oportunity</span>
</v-tab>
</div>
<v-divider vertical light class="mr-3"></v-divider>
</v-tabs>
</div>
<!-- leftside bar footer -->
<v-layout row wrap style="text-align:center" pb-5>
<v-flex>
<span style="font-size:12px">About</span>
</v-flex>
<v-flex>
<span style="font-size:12px">Policy</span>
</v-flex>
<v-flex>
<span style="font-size:12px">Contact</span>
</v-flex>
<v-flex>
<span style="font-size:12px">FAQ</span>
</v-flex>
</v-layout>
</v-card>
</template>
<script>
export default {
name: "vertical-tabs",
props: {
color: String,
height: {
type: [Number, String],
default: 300
},
items: Array,
sliderColor: String,
value: "",
verticalText: Boolean
},
data: () => ({
active: ""
}),
methods: {
//navigate method
navigateTo(route) {
this.$router.push(route);
// console.log("hello");
}
},
computed: {
containerStyle() {
return this.verticalText
? {
height: isNaN(this.height) ? this.height : `${this.height}px`
}
: {
height: 108 * this.items.length + "px"
};
},
tabsStyle() {
return this.verticalText
? {
width: isNaN(this.height) ? this.height : `${this.height}px`
}
: {};
}
}
};
</script>
<style scoped>
hr {
border-top: 0.5px solid #eeee !important;
}
.v-tabs__slider-wrapper {
bottom: 10px;
}
.vertical-tabs {
/* overflow: hidden; */
height: 400px;
}
.vertical-tabs--horizontal-text .v-tabs {
transform: rotate(90deg);
transform-origin: 100px 100px;
height: 200px;
}
.vertical-tabs--horizontal-text .v-tabs >>> .v-tabs__container {
height: 200px;
width: 200px;
text-align: center;
/* padding: 30px; */
}
.vertical-tabs--horizontal-text .v-tabs >>> .v-tabs__div {
width: 35px;
height: 200px;
display: inline-block;
}
.vertical-tabs--horizontal-text .v-tabs >>> .v-tabs__item {
transform: rotate(-90deg);
transform-origin: 100px 100px;
width: 200px;
height: 35px;
display: block;
text-align: left;
line-height: 36px;
white-space: pre;
overflow: hidden;
text-overflow: ellipsis;
}
.vertical-tabs--vertical-text {
width: 35px;
}
.vertical-tabs--vertical-text .v-tabs {
transform: rotate(90deg);
transform-origin: 24px 24px;
}
.vertical-tabs--vertical-text .v-tabs >>> .v-tabs__item {
transform: rotate(180deg);
}
.vertical-tabs--vertical-text .v-tabs >>> .v-tabs__slider-wrapper {
top: 0;
bottom: auto;
}
</style>
<file_sep><template>
<!-- this modal is for see user review details(rating,comments)-->
<div style="background-color:#eee">
<!-- mobile layout -->
<div style="background-color:white;" >
<v-layout row wrap class="">
<v-btn icon @click="sendDialogValue()">
<v-icon>arrow_back</v-icon>
</v-btn>
<p style="margin-top:15px"> <strong>Rating & Reviews</strong> </p>
</v-layout>
</div>
<rating class="mb-1 " :allRatingAverage="allRatingAverage"/>
<reviews :reviewRating="reviewArray"/>
</div>
</template>
<script>
import rating from '@/components/local/rating&Reviews/rating&Reviews.ratings.vue'
import reviews from '@/components/local/rating&Reviews/rating&Reviews.reviews.vue'
export default {
props:['reviewArray','allRatingAverage'],
data: () => ({
reviewRatingData: [
{
averageRating: 3.5,
experienceAverage: 4,
communicationAverage: 3.5,
friendlynessAverage: 3.5,
}
]
}),
components:{
rating,reviews
},
methods: {
sendDialogValue() {
const dialogValue = false;
this.$emit("sendDialogValue", dialogValue);
}
}
};
</script><file_sep><template>
<div style="background-color:#D9E4E6;padding:20px 20px 0px 20px">
<div class="topBorderRound" style="height:-webkit-fill-available;background-color:white">
<v-img
class="topBorderRound"
:aspect-ratio="3"
:src="require('@/assets/images/skillTestBackImage.jpg')"
>
<div style="text-align:right">
<v-btn style="text-align:right" icon @click="emitCloseDialog">
<v-icon>clear</v-icon>
</v-btn>
</div>
</v-img>
<div class="cardCarrierDiv" style=" ">
<v-card class="questionCardDesign" v-for="question in questions" :key='question.id'>
<span>
<Strong style="color:#007799">{{ question.id < 10 ? 0 : '' }}{{question.id}} .</Strong>
</span>
<span>
<Strong>{{question.question}}</Strong>
</span>
<v-layout row wrap>
<v-flex xs12 v-for="(option,i) in question.options" :key="i">
<v-btn-toggle
v-model="selectedOption"
style="margin:5px; border-radius: 50px;height: 40px;width: 40px;"
>
<v-btn
style=" border-radius: 50px;height: 40px;width: 40px; "
:value="option.option"
@click="optionClicked(question, option.option)"
>{{option.no}}</v-btn>
</v-btn-toggle>
{{option.option}}
</v-flex>
<ul style="list-style-type: none;">
<!-- for each response of the current question -->
<li v-for="(option,i) in question.options" :key="i" style="position: relative; margin: 8px 5px;">
<label style="padding: 0px 12px;">
<input type="radio" v-bind:value="response.value"
v-bind:name="index"
v-model="userResponses[index]"
@click="showResponse"
style="margin-top: 5px;"
> {{response.value}}
<span class="checkmark"></span>
</label>
</li>
</ul>
</v-layout>
</v-card>
</div>
<div class="timeBtnStyle">
<div class="keepCenter">
<v-btn @click="dialog=true" small icon>
<v-icon color="white">open_in_new</v-icon>
</v-btn>
<span style="color:white;">{{answeredQuestionCount}}/{{totalQuestionCount}}</span>
<v-layout row wrap>
<v-flex>
<v-icon color="white">alarm</v-icon>
</v-flex>
<v-flex>
<span style="color:yellow; ">{{timeRemaining}}</span>
</v-flex>
</v-layout>
</div>
</div>
</div>
<v-dialog v-model="dialog" fullscreen hide-overlay transition="dialog-bottom-transition">
<unAnsweredList @closeUnanswered="dialog = false" />
</v-dialog>
</div>
</template>
<script>
import unAnsweredList from "@/views/modalView/unAnsweredQueListModal.vue";
import {FormatUtil} from "@/service/utility.service.js"
import {mapActions} from "vuex"
export default {
components: {
unAnsweredList
},
props: [
'testId'
],
data: () => ({
selectedOption: "",
dialog: false,
questionNo: 1,
answeredQuestionCount: 0,
totalQuestionCount: 10,
questions: [],
rawQuestions: [],
}),
computed: {
timeElapsed() {},
timeRemaining() {
return "00:00:30";
}
},
methods: {
emitCloseDialog() {
this.$emit("closeTestModal");
},
...mapActions('skillTestStore',['fetchAllQuestion']),
optionClicked: function(question, option){
console.log(question.id);
console.log(option);
}
},
created(){
this.fetchAllQuestion({testId:this.testId})
.then((res)=>{
this.rawQuestions = res;
this.questions = FormatUtil.formatQuestions(res);
console.log(this.questions);
console.log(res);
})
},
watch: {
'selectedOption': function(){
console.log(this.selectedOption);
}
}
};
</script>
<style scoped>
.v-btn-toggle {
color: black;
}
.theme--light.v-btn-toggle--selected {
background-color: #f4c050;
color: white;
}
.theme--light.v-btn:not(.v-btn--icon):not(.v-btn--flat) {
background-color: transparent;
}
.keepCenter {
font-size: 16px;
color: white;
display: flex;
flex-direction: column;
justify-content: center;
align-items: center;
}
.timeBtnStyle {
background-color: #007799;
position: fixed;
bottom: 0;
left: 50%;
border-top-right-radius: 100px;
border-top-left-radius: 100px;
margin-left: -90px;
width: 180px;
height: 90px;
}
.cardCarrierDiv {
min-height: 100px;
width: 300px;
position: absolute;
left: 50%;
margin-left: -150px;
top: 7%;
}
.questionCardDesign {
padding: 10px;
background-color: #d9e4e6;
border-radius: 10px;
margin-bottom: 10px;
}
.topBorderRound {
position: relative;
border-top-left-radius: 15px;
border-top-right-radius: 15px;
overflow: auto;
}
.buttonStyle {
color: blue !important;
}
.selected {
background-color: red;
}
</style><file_sep><template>
<div class="mb-2">
<v-card>
<v-layout row wrap>
<v-flex xs4>
<v-img :aspect-ratio="1" :src="imageUrl(test.image)"></v-img>
</v-flex>
<v-flex xs7 style="padding:10px">
<span>{{test.title}}</span>
<br />
<span class="subText">Publisher :</span>
<span class="subText">
<strong>{{test.publisherId.name}}</strong>
</span>
<br />
<span class="subText">Price :</span>
<span class="subText">
<strong>{{test.price}}</strong>
</span>
<v-layout row wrap style="color:#007790;">
<v-flex xs1>
<span class="subText">
<strong>{{test.rating}}</strong>
</span>
</v-flex>
<v-flex xs7>
<v-rating
readonly
dense
background-color="black lighten-3"
color="black"
size="10"
v-model="rating"
></v-rating>
</v-flex>
</v-layout>
</v-flex>
</v-layout>
</v-card>
</div>
</template>
<script>
export default {
props: ["test"],
data: () => ({
rating: 3
}),
created() {
this.rating = this.test.rating;
},
computed: {},
methods: {
imageUrl(image) {
if (!image || image === undefined) {
return "https://placehold.it/550x300&text=Test Image";
}
return process.env.VUE_APP_ROOT_API + "/static/" + image;
}
}
};
</script><file_sep><template>
<!-- this component is alternative of bogus vertical tab bar.if that fails it will use -->
<v-card
style="align-items:center;background-color:#eeee;height:100%"
flat
class="pb-3"
>
<v-layout row wrap>
<v-divider></v-divider>
<v-flex xs12 py-2>
<v-btn flat color="indigo" to="/">
<font-awesome-icon
size="2x"
icon="map-marked-alt"
class="mx-4"
/>Opportunity
</v-btn>
</v-flex>
<v-divider></v-divider>
<v-flex xs12 py-2>
<v-btn flat color="indigo" class="active-button teal" to="/connect">
<font-awesome-icon size="2x" icon="code-branch" class="mx-4" />Connect
</v-btn>
</v-flex>
<v-flex xs12 py-2>
<v-btn flat color="indigo">
<font-awesome-icon size="2x" icon="brain" class="mx-4" />Skill
</v-btn>
</v-flex>
</v-layout>
</v-card>
</template>
<style>
.btn-toggle {
flex-direction: column;
}
</style>
<file_sep><template>
<!-- this modal is for mentee when
he find mentor on connect page
and click on mentor card to get
detail about mentor-->
<div style="background-color:#eee">
<!-- mobile layout -->
<div style="background-color:white;" class="mb-1">
<v-layout row wrap class="modalHeader">
<v-btn icon @click="sendDialogValue()">
<v-icon>arrow_back</v-icon>
</v-btn>
</v-layout>
<basic :session="session"/>
</div>
<div class="mb-1">
<info :session="session"/>
</div>
<div class="mb-1" @click="showRatingQuestionsTab">
<ratingCallToAction :session="session" v-if="tabNumber!==1"/>
</div>
<div class="mb-5">
<sessionHistoryTabs
:mode="mode"
:tabNumber="tabNumber"
class="mb-2"
@selectedTabNumber="selectedTabNumber"
:session="session"
/>
</div>
</div>
</template>
<script>
import basic from "@/components/local/completeSessionDetailsModal/completeSessionDetailsModal.basic";
import info from "@/components/global/mobile/global.sessionDetails.info";
import ratingCallToAction from "@/components/local/completeSessionDetailsModal/completeSessionDetailsModal.rateCallToAction";
import sessionHistoryTabs from "@/components/local/completeSessionDetailsModal/completeSessionDetailsModal.sessionRating&History";
import { mapActions, mapGetters } from "vuex";
export default {
props: ["session","mode"],
data: () => ({
tabNumber: 0,
noConversation: true,
reviewedTime: "",
sessionModifiedStartTime: "",
snackbar: false,
color: "error",
timeout: 4000,
snackbartext: "",
y: "",
sessions: [],
hourlyRate: ""
}),
components: {
basic,
info,
ratingCallToAction,
sessionHistoryTabs
},
async created() {
// try {
// const response = await this.fetchMentorBasicDataById({
// mentor_id: this.session.mentorId._id
// });
// this.hourlyRate = response[0].hourlyRate;
// this.services = response[0].services;
// } catch (error) {
// console.log(error);
// }
// console.log('hello',response[0].services);
},
watch: {},
methods: {
//map which actions will need
...mapActions("mentorStore", ["fetchMentorBasicDataById"]),
//complete session alert
showRatingQuestionsTab() {
this.tabNumber = 1;
console.log("hello");
},
showAlert(msg, color, ypos) {
this.color = color;
this.snackbartext = msg;
this.y = ypos;
this.snackbar = true;
},
//back button emiting parent to close modal
sendDialogValue() {
const dialogValue = false;
this.$emit("sendDialogValue", dialogValue);
},
selectedTabNumber(val) {
this.tabNumber = val;
// console.log(this.tabNumber)
}
}
};
</script>
<style>
.modalHeader {
height: 80px;
background-color: #eee;
border-bottom-left-radius: 25px;
border-bottom-right-radius: 25px;
}
</style>
<file_sep>/* eslint-disable no-console */
// common services for all users(mentor, mentee both)
import ApiService from "./api.service";
const commonUserService = {
// fetch all usersdata
async fetchAllUser() {
try {
const response = await ApiService.get("/users");
// console.log(response);
return response.data.data;
} catch (error) {
// console.log(error.response);
}
},
//users details info by id(api no 6.1)
async fetchUserDetailInfoById(user_id) {
// console.log(" user id from safsaf service" + user_id);
try {
const response = await ApiService.get(
"users?user_id=" + user_id + "&operation=DetailUserData "
);
// console.log(response);
return response.data.data;
} catch (error) {
// console.log(error.response);
}
},
// user's basic info by id (api no 6)
async fetchUserBasicInfoById(user_id) {
// console.log("loggedin user id from service" + user_id);
try {
const response = await ApiService.get(
"users?user_id=" + user_id + "&operation=BasicUserData "
);
// console.log(response);
return response.data.data;
} catch (error) {
// console.log(error.response);
}
},
// summary count of sessions of a user (api no 7)
async fetchUserSessionCountById(user_id) {
try {
const response = await ApiService.get(
"user-sessions?userId=" + user_id + "&operation=Summary "
);
// console.log(response);
return response.data.data;
} catch (error) {
// console.log(error.response);
}
},
//to get unseen messages of a user, to get all : do not pass status (api no 15)
async fetchAllMessages(user_id) {
try {
// var unseen;
const response = await ApiService.get(
"messages?userId=" +
user_id +
"&status=&sortby=sentTime&order=desc&limit=5&page=1"
);
// console.log(response);
return response.data.data;
} catch (error) {
// console.log(error.response);
}
},
//to get unseen notifications of a user, to get all pass status = "all") (api no 16)
async fetchAllNotifications(user_id) {
try {
var all;
const response = await ApiService.get(
"notifications?user_id=" +
user_id +
"&status=" +
all +
"&sortby=createdTime&order=desc&limit=5&page=1"
);
// console.log(response);
return response.data.data;
} catch (error) {
// console.log(error.response);
}
},
async fetchAllSkills() {
try {
// var unseen;
const response = await ApiService.get("/skills");
// console.log(response);
return response.data.data;
} catch (error) {
// console.log(error.response);
}
},
async saveUpdateProfile(user_data, user_id) {
try {
const response = await ApiService.put("/users/" + user_id, user_data);
// console.log(response);
return response.data.data;
} catch (error) {
// console.log(error.response);
}
}
};
export default commonUserService;
<file_sep><template>
<v-layout row wrap px-2 py-1 style="background-color:white">
<v-flex xs12>
<h4 style="color:#007799">Skills</h4>
</v-flex>
<v-flex xs12>
<div style="text-align:center">
<v-chip v-for="(skill, i) in profileData.skills" :key="i">{{
skill.name
}}</v-chip>
</div>
</v-flex>
</v-layout>
</template>
<script>
export default {
props: ["profileData"],
mounted() {
// console.log(this.profileData);
}
};
</script>
<file_sep>export const getters = {
appTitle: state => {
return state.appTitle;
},
userID: state => {
return state.userID;
},
loading: state => {
return state.loading;
}
};
<file_sep>/* eslint-disable no-console */
// common services for all users(mentor, mentee both)
import ApiService from "./api.service";
const SkillTestService = {
async fetchAllQuestion(testId) {
try {
const response = await ApiService.get(
"/skill-test-questions?testId=" + testId + "&operation=TestQuestions"
);
// console.log(response);
return response.data.data;
} catch (error) {
// console.log(error.response);
}
},
async fetchAllTests(type) {
try {
let response = null;
if (type) {
response = await ApiService.get("/skill-tests?operation=" + type);
// console.log(response);
} else {
response = await ApiService.get("/skill-tests");
}
return response.data.data;
} catch (error) {
// console.log(error.response);
}
},
async saveResult(result) {
try {
const response = await ApiService.post("/skill-test-results", result);
// console.log(response);
return response.data.data;
} catch (error) {
// console.log(error.response);
}
},
async fetchPopularTopics() {
try {
const response = await ApiService.get(
"skill-tests?operation=PopularTopics"
);
return response.data.data;
} catch (error) {
return error;
}
},
async fetchAuthorizers() {
try {
const response = await ApiService.get(
"skill-tests?operation=Authorizers"
);
return response.data.data;
} catch (error) {
console.log(error);
}
},
async fetchAllTopics() {
try {
const response = await ApiService.get("skill-test-topics");
return response.data.data;
} catch (error) {
console.log(error);
}
}
};
export default SkillTestService;
<file_sep>export const state = {
authenticating: false,
accessToken: "",
authenticationErrorCode: 0,
authenticationError: "",
refreshTokenPromise: null,
isAuthenticate: false
};
<file_sep>/* eslint-disable no-console */
import CommonUserService from "../../../service/commonUser.service";
export const actions = {
//user's details info by id TODO: api 3 according to postman.but not for discord.
async fetchUserDetailInfoById({ commit }, { user_id }) {
// console.log("loggedin user id from action" + user_id);
try {
const response = await CommonUserService.fetchUserDetailInfoById(user_id);
commit("saveUserDetailInfoById", response);
return response;
} catch (error) {
console.log(error);
}
},
// user's basic info by id
async fetchUserBasicInfoById({ commit }, { user_id }) {
// console.log("loggedin user id from action" + user_id);
try {
const response = await CommonUserService.fetchUserBasicInfoById(user_id);
commit("saveUserBasicInfoById", response);
return response;
} catch (error) {
console.log(error);
}
},
// summary count of sessions of a user
async fetchUserSessionCountById({ commit }, { user_id }) {
try {
const response = await CommonUserService.fetchUserSessionCountById(
user_id
);
commit("saveUserSessionCountById", response);
return response;
} catch (error) {
console.log(error);
}
},
// get all user messages
async fetchAllMessages({ commit }, { user_id }) {
try {
const response = await CommonUserService.fetchAllMessages(user_id);
commit("saveAllMessages", response);
return response;
} catch (error) {
console.log(error);
}
},
// get all user notification
async fetchAllNotifications({ commit }, { user_id }) {
try {
const response = await CommonUserService.fetchAllNotifications(user_id);
commit("saveAllNotifications", response);
return response;
} catch (error) {
console.log(error);
}
},
async fetchAllSkills({ commit }) {
try {
const response = await CommonUserService.fetchAllSkills();
commit("saveAllSkills", response);
return response;
} catch (error) {
console.log(error);
}
},
async saveUpdateProfile({ commit }, { user_data, user_id }) {
try {
const response = await CommonUserService.saveUpdateProfile(
user_data,
user_id
);
commit("saveUserBasicInfoById", response);
return response;
} catch (error) {
console.log(error);
}
}
};
<file_sep><template>
<v-card class="elevation-0">
<v-list two-line>
<v-list-tile v-for="item in itemsComingSoon" :key="item.title" avatar>
<v-list-tile-content>
<span class="titleText">{{ item.title }}</span>
<v-list-tile-sub-title class="subText" v-for="(item, i) in item.subtitle" :key="i">
{{ item }}
<br />
</v-list-tile-sub-title>
</v-list-tile-content>
</v-list-tile>
</v-list>
</v-card>
</template>
<script>
import { mapActions, mapGetters } from "vuex";
export default {
data: () => ({
//informatics data for mobile
itemsComingSoon: [
{
icon: "assignment",
iconClass: "blue white--text",
title: "Popular Careers",
subtitle: ["coming soon"]
},
{
icon: "call_to_action",
iconClass: "amber white--text",
title: "Average Salary/month",
subtitle: ["coming soon"]
}
],
itemsDemo: [
{
icon: "assignment",
iconClass: "blue white--text",
title: "Popular Careers",
subtitle: ["UI/UX Development", "Data Science", "Block Chain etc"]
},
{
icon: "call_to_action",
iconClass: "amber white--text",
title: "Average Salary/month",
subtitle: [
"UI/UX Development(1500$)",
"Data Science(2000$)",
"Block Chain(2323$)"
]
}
]
}),
async created() {
var industryId = 1;
var industry_id_array = [{ industryId }];
// this.fetchSalariesOfIndustry({ industry_id: industryId });
// this.fetchCareerListOfIndustries({industry_id_array: [{industryId}]});
},
computed: {
// ...mapGetters("industryStore", [
// "getCareerListOfIndustries",
// "getSalariesOfIndustry"
// ])
},
methods: {
// ...mapActions("industryStore", [
// "fetchCareerListOfIndustries",
// "fetchSalariesOfIndustry"
// ])
}
};
</script>
<style>
@media screen and (max-width: 375px), screen and (max-height: 667px) {
.v-list__tile--avatar {
height: 38px;
}
}
</style>
<file_sep><template>
<div>
<v-layout row wrap>
<v-flex py-2 px-2>
<v-img :src="require('@/assets/icons/common/careerKiLogo.png')"></v-img>
</v-flex>
<v-flex style="justify-content:flex-end;display:flex">
<v-btn flat icon @click="stopdrawer">
<v-icon>arrow_back</v-icon>
</v-btn>
</v-flex>
</v-layout>
<!-- asdfasd {{iamLoggedIn}} -->
<v-list class="pa-1" v-if="isAuthenticate">
<v-list-tile avatar @click="$router.push({ name: 'userProfile' })">
<v-list-tile-avatar size="40" color="#007799">
<v-icon dark>person</v-icon>
</v-list-tile-avatar>
<!-- <v-list-tile-avatar>
<img :src="require('@/assets/my.jpg')">
</v-list-tile-avatar>-->
<v-list-tile-content>
<v-list-tile-title>{{ (name!=undefined)?name:username }}</v-list-tile-title>
<v-list-tile-sub-title>View Profile</v-list-tile-sub-title>
</v-list-tile-content>
</v-list-tile>
</v-list>
<v-btn block class="elevation-0" v-if="showMe">
<span style="font-size:13px;font-weight:400">CareerKi professional</span>
</v-btn>
<v-list v-if="isAuthenticate">
<v-list-tile v-for="item in items1" :key="item.title">
<v-list-tile-action v-if="showMe">
<v-icon>{{ item.icon }}</v-icon>
</v-list-tile-action>
<v-list-tile-content>
<v-list-tile-sub-title @click="navigateTo(item.route)">
{{
item.title
}}
</v-list-tile-sub-title>
</v-list-tile-content>
</v-list-tile>
</v-list>
<v-list>
<v-divider></v-divider>
<v-list-tile
v-for="item in items2"
:key="item.title"
@click="$router.push({ name: item.route })"
>
<v-list-tile-action v-if="showMe">
<v-icon>{{ item.icon }}</v-icon>
</v-list-tile-action>
<v-list-tile-content>
<v-list-tile-sub-title>{{ item.title }}</v-list-tile-sub-title>
</v-list-tile-content>
</v-list-tile>
</v-list>
<v-divider></v-divider>
<v-list>
<v-list-tile>
<v-list-tile-content>
<v-list-tile-sub-title v-if="!isAuthenticate" @click="dialogLogin=true">Login</v-list-tile-sub-title>
<!-- <v-list-tile-sub-title v-if="!isAuthenticate">Sign Up</v-list-tile-sub-title> -->
<v-list-tile-sub-title v-if="isAuthenticate" @click="logoutUser()">Log Out</v-list-tile-sub-title>
</v-list-tile-content>
</v-list-tile>
</v-list>
<div>
<v-dialog
v-model="dialogLogin"
fullscreen
hide-overlay
transition="slide-x-transition"
style="height: 100%;background-color:#eee"
>
<auth @backToPage="getAuthDialogValue" @closeModalFromLogin="closeModalFromLogin" />
<!-- <h1>asdfasfaf</h1> -->
<!-- <login/> -->
</v-dialog>
</div>
<!-- <div style="justify-content: center;display: flex;" v-if="isAuthenticate">
<v-btn class="elevation-0" style="border-radius:25px;">
<v-icon>logout</v-icon>Log out
</v-btn>
</div>-->
</div>
</template>
<script>
//api
// import store from "@/store";
import { mapActions, mapGetters, mapState } from "vuex";
import { UserInfoService } from "@/service/storage.service";
import auth from "@/views/modalView/auth";
export default {
components: {
auth
},
props: ["drawer", "isLoggedIn"],
data() {
return {
dialogLogin: false,
userId: UserInfoService.getUserID(),
name: "",
logValue: false,
loggingDataValue: "",
username: "",
showMe: false,
userBasic: [],
items1: [
{
title: "My sessions",
route: "sessionDashboard",
icon: "dashboard",
isdisable: false
},
{
title: "My tests",
route: "",
icon: "question_answer",
isdisable: true
},
{
title: "My career prescription",
route: "",
icon: "dashboard",
isdisable: true
},
{
title: "My training & education",
route: "",
icon: "question_answer",
isdisable: true
}
],
items2: [
{
title: "About us",
route: "",
icon: "question_answer",
isdisable: true
},
{ title: "Contact us", route: "", icon: "dashboard", isdisable: true },
{
title: "Policies",
route: "",
icon: "question_answer",
isdisable: true
},
{ title: "FAQ's", route: "", icon: "question_answer", isdisable: true }
]
};
},
mounted() {
// console.log("my user id " + this.userId);
},
async created() {
// const isAuthed = this.isAuthenticate;
// console.log(this.isAuthenticate);
if (this.userId === null) {
return null;
} else {
this.userBasic = await this.fetchUserBasicInfoById({
user_id: this.userId
});
this.name = this.userBasic[0].name;
this.username = this.userBasic[0].userName;
console.log(this.username);
console.log(this.name);
return this.userBasic;
}
// console.log(this.userBasic);
},
computed: {
...mapState("authStore", ["isAuthenticate"])
// ...mapState('authStore',{isLogger: state => state.isAuthenticate}),
// this.$store.watch((state)=> state.authStore.isAuthenticate);
},
watch: {
userId() {
console.log(this.userId);
}
},
methods: {
logoutUser() {
this.logout();
},
//logout function called directly from action
//no method creat needed
...mapActions("authStore", ["logout"]),
...mapActions("commonUserStore", ["fetchUserBasicInfoById"]),
// navigate to Route and close sidenavbar
navigateTo(route) {
this.$router.push({ name: route });
this.stopdrawer();
},
//close sidenavbar
stopdrawer() {
const stopdrawer = false;
this.$emit("drawer", stopdrawer);
},
closeModalFromLogin(val) {
this.dialogLogin = val;
},
getAuthDialogValue(val) {
this.dialogLogin = val;
}
}
};
</script>
<style></style>
<file_sep>export const mutations = {
saveQuestions: (state, payload) => {
state.questions = payload;
},
saveTest: (state, payload) => {
state.test = payload;
},
saveTestDetails: (state, payload) => {
state.testDetails = payload;
},
saveSkillTests: (state, payload) => {
state.skillTests = payload;
},
savePopularTests: (state, payload) => {
state.popularTests = payload;
},
saveSuggestedTests: (state, payload) => {
state.suggestedTests = payload;
},
saveResult: (state, payload) => {
state.result = payload;
},
saveAuthorizers: (state, payload) => {
state.authorizers = payload;
}
};
<file_sep>//this is the main landing page of my profile,skill test listing & psychometric test tabs
<template>
<div>
<v-toolbar color="#007799" dark tabs class="modalHeaderForSkillTestHome">
<template v-slot:extension>
<v-tabs v-model="model" centered color="#007799">
<v-tabs-slider color="#ffae42" style="height:3px"></v-tabs-slider>
<v-tab v-for="(tab,i) in tabs" :key="i" :href="`#tab-${i}`">
<span style="color:white;text-transform: capitalize;">{{ tab }}</span>
</v-tab>
</v-tabs>
</template>
</v-toolbar>
<v-tabs-items v-model="model" touchless>
<v-tab-item :value="`tab-0`">
<skillTestProfile />
</v-tab-item>
<v-tab-item :value="`tab-1`">
<v-card flat>
<skillTestList />
</v-card>
</v-tab-item>
<v-tab-item :value="`tab-2`">
<!-- <v-card flat></v-card> -->
<!-- <careerTest/> -->
<v-layout row wrap>
<v-flex
style=" display:flex;justify-content:center;align-items:center;flex-direction:column;margin-top:200px"
>
<v-btn
style="color:white;height:100px;width:100px;"
round
color="#007799"
to="/psychometricTest"
>Start test</v-btn>
</v-flex>
</v-layout>
</v-tab-item>
</v-tabs-items>
</div>
</template>
<script>
import careerTest from "@/views/pageView/careerTest.vue";
import skillTestProfile from "@/views/pageView/skillTestProfile.vue";
import skillTestList from "@/views/pageView/skillTestListing.vue";
export default {
components: {
skillTestList,
skillTestProfile,
careerTest
},
data: () => ({
tabs: ["Test Profile", "Skill Test", "Personality Test"],
model: "tab-1"
}),
created() {
console.log(this.$route.path);
}
};
</script>
<style>
.modalHeaderForSkillTestHome {
height: 105px;
background-color: #007799;
border-bottom-left-radius: 25px;
border-bottom-right-radius: 25px;
}
</style>
<file_sep>export const mutations = {
saveSessionDetails: (state, payload) => {
state.sessionDetails = payload;
},
saveActiveSession: (state, payload) => {
state.activeSession = payload;
},
savePendingSession: (state, payload) => {
state.pendingSession = payload;
},
saveCompleteSession: (state, payload) => {
state.completeSession = payload;
},
saveSessionDashboardSummery: (state, payload) => {
state.sessionDashboardSummery = payload;
},
saveSessionListById: (state, payload) => {
state.sessionListById = payload;
}
};
<file_sep><template>
<div class="pt-2" style="margin-bottom: 50px;">
<sessionSummaryBox />
<v-layout wrap align-center px-5>
<v-flex xs12>
<v-select
:items="items"
solo
flat
dense
:height="10"
item-text="text"
item-value="value"
v-model="mode"
:label="selecDashboardView"
></v-select>
</v-flex>
</v-layout>
<sessionListTab :sessionData="sessionData" @setType="getType" :mode="mode"/>
<div
v-if="sessionData.length === 0 && !noDataAvailable"
style="background-color:white;text-align:center"
>
<v-progress-circular indeterminate color="#007799" class="bottom"/>
</div>
<v-card
v-if="noDataAvailable"
flat
style="height:300px;padding:30px;display: flex;
justify-content: center;
flex-direction: column;
text-align: center;"
>
<v-card-text>
<p>No Data Available</p>
</v-card-text>
</v-card>
</div>
</template>
<script>
import { UserInfoService } from "@/service/storage.service";
import sessionSummaryBox from "@/components/local/sessionDashboard/sessionDashboard.sessionSummaryBox";
import sessionListTab from "@/components/local/sessionDashboard/sessionDashboard.sessionListTab";
import data from "@/data/data.session.json";
import { mapActions, mapGetters } from "vuex";
import { setTimeout } from "timers";
export default {
data: () => ({
noDataAvailable: false,
sessionListArrayForCheck: null,
pageNo: 1,
bottom: false,
userId: UserInfoService.getUserID(),
mode: "User",
type: "Pending",
selecDashboardView: "View as professionals",
sessionData: [],
items: [
{ value: "Mentor", text: "View as professionals" },
{ value: "User", text: "View as mentee" }
]
}),
components: {
sessionSummaryBox,
sessionListTab
},
async created() {
window.addEventListener("scroll", () => {
this.bottom = this.bottomVisible();
});
await this.addSessionList(this.pageNo).then(() => {
// console.log();
setTimeout(() => {
if (this.sessionListArrayForCheck.length === 0) {
this.noDataAvailable = true;
// console.log("hello everyone");
} else {
this.noDataAvailable = false;
}
}, 4000);
});
},
watch: {
async mode() {
this.pageNo = 1;
await this.fetchSessionListById({
user_id: this.userId,
type: this.type,
mode: this.mode,
page_no: this.pageNo
}).then((result) => {
setTimeout(() => {
if (result.length === 0) {
// console.log(result.length)
this.noDataAvailable = true;
} else {
this.sessionData = result
// console.log("hello 2", result);
this.noDataAvailable = false;
}
}, 4000);
});
var sortedSessionList = this.sessionData.sort(function(a, b) {
return new Date(b.updatedTime) - new Date(a.updatedTime);
});
// console.log("from dash", sortedSessionList);
},
bottom(bottom) {
if (bottom) {
// console.log( this.sessionListArrayForCheck.length)
this.addSessionList(this.pageNo++);
} else {
}
}
},
computed: {
...mapGetters("sessionStore", [
"getSessionDashboardSummery",
"getSessionListById"
])
},
methods: {
...mapActions("sessionStore", [
"fetchSessionDashboardSummeryById",
"fetchSessionListById"
]),
async getType(e) {
this.type = e;
this.pageNo = 1;
this.sessionData = await this.fetchSessionListById({
user_id: this.userId,
type: this.type,
mode: this.mode,
page_no: this.pageNo
});
},
bottomVisible() {
const scrollY = window.scrollY;
const visible = document.documentElement.clientHeight;
const pageHeight = document.documentElement.scrollHeight;
const bottomOfPage = visible + scrollY >= pageHeight;
return bottomOfPage || pageHeight < visible;
},
async addSessionList(pageNo) {
let apiInfo;
await this.fetchSessionListById({
user_id: this.userId,
type: this.type,
mode: this.mode,
page_no: this.pageNo
}).then(response => {
this.sessionListArrayForCheck = response;
response.map(session => {
apiInfo = {
createdTime: session.createdTime,
invoiceId: session.invoiceId,
mentorId: session.mentorId,
paymentStatus: session.paymentStatus,
paymentTime: session.paymentTime,
requestReviewedTime: session.requestReviewedTime,
serviceOffered: session.serviceOffered,
industry: session.industry,
mentorRating: session.mentorRating,
sessionEndTime: session.sessionEndTime,
sessionModifiedStartTime: session.sessionModifiedStartTime,
sessionRequestedSpot: session.sessionRequestedSpot,
sessionRequestedTime: session.sessionRequestedTime,
sessionStartTime: session.sessionStartTime,
sessionStatus: session.sessionStatus,
updatedTime: session.updatedTime,
userId: session.userId,
_id: session._id
};
return this.sessionData.push(apiInfo);
});
// console.log("asdfasdf", this.sessionListArrayForCheck);
if (this.bottomVisible() && this.sessionListArrayForCheck.length != 0) {
this.addSessionList(this.pageNo++);
} else {
// console.log("asdfasfafsfasdf", this.sessionListArrayForCheck);
}
});
}
}
};
</script>
<style scoped>
.theme--light.v-text-field--solo > .v-input__control > .v-input__slot {
color: #eee;
}
</style>
<file_sep>/* eslint-disable no-console */
// import PaymentService from "../../../service/session.service";
export const actions = {
//TODO: No services yet.might add in near future
// fetch a mentor's basic data by his id (api no 3)
// async fetchQuizzesByTopic({ commit }, { mentor_id }) {
// try {
// const response = await PaymentService.fetchQuizzesByTopic(mentor_id);
// commit("savePosition", response);
// return response;
// } catch (error) {
// console.log(error);
// }
// },
};
<file_sep><template>
<div style="background-color:#eee" class="mb-4">
<!-- mobile layout -->
<div style="background-color:white;">
<!-- connection search bar-->
<v-layout
row
wrap
style="background-color:#EEE;border-bottom-left-radius:25px;border-bottom-right-radius:25px"
>
<v-flex xs12 mt-3 mx-2 text-xs-center>
<v-text-field
class="roundcombobox combobox"
solo
label="Search Your Connection"
append-icon="search"
></v-text-field>
</v-flex>
</v-layout>
<v-layout row wrap>
<!-- filter connections according to industry/interest selection(autoComplete) -->
<v-flex xs6>
<div>
<v-select
solo
flat
class="px-2 py-2 autoComplete"
v-model="industryValue"
:items="industries"
item-text="name"
item-value="_id"
label="Industry/Interest"
hide-details
hide-selected
></v-select>
</div>
</v-flex>
<!-- filter connections according to price range selection(autoComplete) -->
<v-flex xs6>
<v-select
solo
flat
class="px-2 py-2 autoComplete"
v-model="priceValue"
:items="price"
hide-details
hide-selected
item-text="name"
item-value="priceValue"
label="Price"
></v-select>
<!-- {{priceValue}} -->
</v-flex>
</v-layout>
</div>
<!-- mentor card list layout -->
<v-layout row wrap>
<!-- {{mentors}} -->
<v-flex xs12 v-for="mentor in mentors" :key="mentor._id">
<connectMentorCard :mentor="mentor" :isLoggedIn="isLoggedIn" />
</v-flex>
</v-layout>
<div v-if="mentors.length === 0" style="background-color:white;text-align:center">
<v-progress-circular indeterminate color="#007799" class="bottom" />
</div>
</div>
</template>
<script>
//store
import { mapActions, mapGetters, mapState } from "vuex";
//global
import connectMentorCard from "@/components/global/global.mentor.connect.card";
import mentorData from "@/data/data.mentor.json";
export default {
props: ["isLoggedIn"],
components: {
connectMentorCard
},
data: () => ({
mentorsListArrayForCheck: null,
pageNo: 1,
bottom: false,
industry: [
"Technology Companies",
"Garments Industries",
"Chemicel Industries"
],
price: ["2000-3000", "3000-4000", "4000-5000"],
industryValue: "",
priceValue: "2000-5000",
search: null,
industries: [],
mentors: []
}),
async created() {
this.industries = await this.fetchIndustries().catch(function onError(
error
) {
// console.log(error);
});
window.addEventListener("scroll", () => {
this.bottom = this.bottomVisible();
});
// console.log('hey from watch',this.mentors)
this.addMentorList(this.pageNo);
},
mounted() {
// console.log(this.mentorsBasicDataByIndustriesId)
},
watch: {
async industryValue() {
this.pageNo = 1;
this.mentors = [];
// console.log("from price",this.mentors);
try {
this.mentors = await this.fetchMentorsBasicDataByIndustriesId({
industry_id: this.industryValue,
price_range: this.priceValue,
page_no: this.pageNo
});
} catch (error) {
console.log(error);
}
},
async priceValue() {
this.pageNo = 1;
this.mentors = [];
// console.log("from price",this.mentors);
this.mentors = await this.fetchMentorsBasicDataByIndustriesId({
industry_id: this.industryValue,
price_range: this.priceValue,
page_no: this.pageNo
}).catch(function onError(error) {
console.log(error);
});
},
bottom(bottom) {
if (bottom) {
this.addMentorList(this.pageNo++);
} else {
}
}
},
computed: {
...mapState("industryStore", {
industryList: state => state.industries,
mentorsByIndustry: state => state.mentorsBasicDataByIndustriesId
})
},
methods: {
noDataAvailable() {},
...mapActions("industryStore", [
"fetchIndustries",
"fetchMentorsBasicDataByIndustriesId"
]),
bottomVisible() {
const scrollY = window.scrollY;
const visible = document.documentElement.clientHeight;
const pageHeight = document.documentElement.scrollHeight;
const bottomOfPage = visible + scrollY >= pageHeight;
return bottomOfPage || pageHeight < visible;
},
async addMentorList(pageNo) {
let apiInfo;
// let mentorsListArrayForCheck = [];
await this.fetchMentorsBasicDataByIndustriesId({
industry_id: this.industryValue,
price_range: this.priceValue,
page_no: this.pageNo
}).then(response => {
this.mentorsListArrayForCheck = response;
// console.log(response);
response.map(mentor => {
apiInfo = {
mentoringPlaces: mentor.mentoringPlaces,
bio: mentor.bio,
company: mentor.company,
designation: mentor.designation,
education: mentor.education,
hourlyRate: mentor.hourlyRate,
imageUrl: mentor.imageUrl,
industry: mentor.industry,
mentorRating: mentor.mentorRating,
hourlyRate: mentor.hourlyRate,
name: mentor.name,
services: mentor.services,
skills: mentor.skills,
_id: mentor._id
};
return this.mentors.push(apiInfo);
});
// console.log("asdfasdf", this.mentorsListArrayForCheck);
if (this.bottomVisible() && this.mentorsListArrayForCheck.length != 0) {
this.addMentorList(this.pageNo++);
} else {
// return null;
}
});
}
}
};
</script>
<style>
.combobox.v-autocomplete__content.v-menu__content .v-card {
align-items: center;
display: flex;
flex-direction: column;
}
.combobox.v-text-field.v-text-field--solo .v-label {
left: 30% !important;
}
.roundcombobox.v-text-field.v-text-field--solo:not(.v-text-field--solo-flat)
> .v-input__control
> .v-input__slot {
border-radius: 25px;
}
.autoComplete.theme--light.v-text-field--solo
> .v-input__control
> .v-input__slot {
background: #eee;
}
</style>
<file_sep><template>
<v-card flat class="py-5">
<v-card-title>
<h2>Lets Find A Mentor for Career Guidline</h2>
</v-card-title>
<v-card-title>
<h2>Connect with mentors</h2>
</v-card-title>
<v-card-title>
<h2>Improve your skill</h2>
</v-card-title>
</v-card>
</template>
<script>
export default {};
</script>
<style></style>
<file_sep><template>
<div style="background-color:white;">
<v-layout row wrap>
<v-flex xs7>
<div class="ratingBlock">
<h1>{{averageOfAll}}</h1>
<v-rating
v-model="averageOfAll"
readonly
dense
size="15"
color="#3B4042"
background-color="#3B4042"
></v-rating>
<p>2500 reviews</p>
</div>
</v-flex>
<v-flex xs5 style="margin-top:10px">
<span class="spanFontSize">
<Strong>Experience</Strong>
</span>
<br />
<v-layout row wrap>
<v-flex xs6>
<v-rating
readonly
v-model="allRatingAverage[0].avgExperience"
dense
size="10"
color="#3B4042"
background-color="#3B4042"
></v-rating>
</v-flex>
<v-flex xs6>({{allRatingAverage[0].avgExperience}})</v-flex>
</v-layout>
<span class="spanFontSize">
<Strong>Communication</Strong>
</span>
<br />
<v-layout row wrap>
<v-flex xs6>
<v-rating
readonly
v-model="allRatingAverage[0].avgCommunication"
dense
size="10"
color="#3B4042"
background-color="#3B4042"
></v-rating>
</v-flex>
<v-flex xs6>({{allRatingAverage[0].avgCommunication}})</v-flex>
</v-layout>
<span class="spanFontSize">
<Strong>Friendlyness</Strong>
</span>
<br />
<v-layout row wrap>
<v-flex xs6>
<v-rating
readonly
v-model="allRatingAverage[0].avgFriendliness "
dense
size="10"
color="#3B4042"
background-color="#3B4042"
></v-rating>
</v-flex>
<!-- {{reviewRating}} -->
<v-flex xs6>({{allRatingAverage[0].avgFriendliness}})</v-flex>
</v-layout>
</v-flex>
</v-layout>
</div>
</template>
<script>
import { mapActions } from "vuex";
export default {
props: ["allRatingAverage"],
data: () => ({
averageOfAll: 0
}),
created() {
this.averageOfAll =
(this.allRatingAverage[0].avgCommunication +
this.allRatingAverage[0].avgExperience +
this.allRatingAverage[0].avgFriendliness)/3;
console.log("hello jello", this.allRatingAverage);
},
methods: {
...mapActions("mentorStore", ["fetchAllReviewsAverage"])
}
};
</script>
<style >
.spanFontSize {
font-size: 12px;
}
.ratingBlock {
margin: 10px 50px 20px 50px;
border: 1px solid grey;
padding: 10px;
text-align: center;
border-bottom-left-radius: 25px;
}
</style><file_sep><template>
<div>
<v-card class="elevation-0" style="border:.5px solid #eee">
<div>
<v-layout row wrap px-1 pt-2 pb-1>
<v-flex xs5>
<v-layout row wrap>
<v-flex xs12 px-2>
<p style="font-size:15px">ID: {{session._id.slice(0,8)}}</p>
<v-chip small v-if="session.requestReviewedTime!=null">Accepted</v-chip>
<v-chip
small
v-if="session.requestReviewedTime!=null && session.paymentStatus==='Pending'"
>Due Payment</v-chip>
<v-chip
small
v-if="session.requestReviewedTime!=null && session.paymentStatus==='Active'"
>Connected</v-chip>
</v-flex>
<v-flex xs12 px-2 pt-4 style="font-size:15px">
<span>{{dateFormate(sessionDate)}}</span>
<br>
<span>{{sessionTime}}</span>
<br>
<span>{{session.sessionRequestedSpot}}</span>
</v-flex>
</v-layout>
</v-flex>
<v-flex py-3>
<v-divider vertical></v-divider>
</v-flex>
<v-flex xs6>
<v-layout row wrap>
<v-flex xs12 px-3 style="justify-content:center;display:flex;">
<v-avatar>
<img :src="require('@/assets/user.png')" style="position:left" alt="John">
</v-avatar>
</v-flex>
<v-flex xs12 px-2 style="text-align:center;">
<p>
<Strong
v-if="mode==='User'"
style="color:#007799;font-size:15px;"
>{{session.mentorId.name}}</Strong>
<Strong
v-if="mode==='Mentor'"
style="color:#007799;font-size:15px;"
>{{session.userId.name}}</Strong>
</p>
<div>
<span v-for="(data, i) in session.serviceOffered" :key="i">{{ data.name }},</span>
</div>
<div style="flex-direction: column;
align-items: flex-end;
display: flex;">
<v-btn small flat color="#007799" @click.stop="dialog = true">
<span style="color:#007799">View</span>
</v-btn>
<v-dialog
v-model="dialog"
v-if="sessionType === 'Active' && dialog"
fullscreen
hide-overlay
transition="slide-x-transition"
style="height: 100%;background-color:#eee"
>
<!-- sessionDetails modal component (active)-->
<activeSessionDetailsModal
:mode="mode"
:session="session"
@sendDialogValue="getDialogValue"
style="margin-bottom:50px;top:0"
/>
</v-dialog>
<v-dialog
v-if="
mode === 'User' && sessionType === 'Pending' && dialog
"
v-model="dialog"
fullscreen
hide-overlay
transition="slide-x-transition"
>
<menteeAccept
:getSessionDetails="session"
@sendDialogValue="getDialogValueForMenteeAccept"
/>
<actionButtonsForMenteeAccept
:session="session"
@dialogValueFromPayConfirm="getDialogValueForMenteeAccept"
/>
</v-dialog>
<v-dialog
v-if="sessionType === 'Complete' && dialog"
v-model="dialog"
fullscreen
hide-overlay
transition="slide-x-transition"
width="500"
>
<!-- completeSessiondetails modal -->
<completeSessionDetailsModal
:session="session"
:mode="mode"
@sendDialogValue="getDialogValueFromCompletedSession"
/>
</v-dialog>
<v-dialog
v-if="
mode === 'Mentor' && sessionType === 'Pending' && dialog
"
v-model="dialog"
fullscreen
hide-overlay
transition="slide-x-transition"
>
<mentorAccept
:session="session"
@sendDialogValue="getDialogValueForMentorAccept"
/>
<actionButtonsForMentorAccept
:session="session"
@reviewedTime="reviewedTime"
@reviewedDate="reviewedDate"
@reviewedSpot="reviewedSpot"
/>
</v-dialog>
</div>
</v-flex>
</v-layout>
</v-flex>
</v-layout>
</div>
</v-card>
</div>
</template>
<script>
import mentorAccept from "@/views/modalView/acceptFromMentorModal";
import actionButtonsForMentorAccept from "@/components/local/acceptFromMentorModal/acceptFromMentorModal.actionButtons";
import activeSessionDetailsModal from "@/views/modalView/activeSessionDetailsModal.vue";
import menteeAccept from "@/views/modalView/acceptFromMenteeModal";
import actionButtonsForMenteeAccept from "@/components/local/acceptFromMenteeModal/acceptFromMenteeModal.actionButtons";
import PageNotReady from "@/components/global/global.PageNotReady";
import completeSessionDetailsModal from "@/views/modalView/completeSessionDetailsModal";
export default {
components: {
activeSessionDetailsModal,
menteeAccept,
completeSessionDetailsModal,
PageNotReady,
actionButtonsForMenteeAccept,
actionButtonsForMentorAccept,
mentorAccept
},
props: ["session", "sessionType", "mode"],
data: () => ({
sessionDate: "",
sessionTime: "",
mentorReviewedTime: "",
mentorReviewedDate: "",
mentorReviewedSpot: "",
dialog: false
}),
computed: {
isMentorAccept() {
return;
}
},
async created() {
//getting mentor skills
var index = 0;
// TODO: get mentor skills
// this.mentorAdditionalData = await this.fetchMentorDetailsDataById({
// mentor_id:this.session.mentorId._id
// });
//Convert mentee requested date & time convertion
if (this.session.sessionModifiedStartTime === null) {
const timeUTC = this.session.sessionRequestedTime;
const year = new Date(timeUTC).getUTCFullYear();
const month = new Date(timeUTC).getUTCMonth();
const date = new Date(timeUTC).getUTCDate();
const hours = new Date(timeUTC).getHours();
const minutes = new Date(timeUTC).getMinutes();
this.sessionDate = year + "-" + month + "-" + date;
this.sessionTime = hours + ":" + minutes;
// console.log(this.sessionDate ,this.sessionTime )
} else {
const timeUTC = this.session.sessionModifiedStartTime;
const year = new Date(timeUTC).getUTCFullYear();
const month = new Date(timeUTC).getUTCMonth();
const date = new Date(timeUTC).getUTCDate();
const hours = new Date(timeUTC).getHours();
const minutes = new Date(timeUTC).getMinutes();
this.sessionDate = year + "-" + month + "-" + date;
this.sessionTime = hours + ":" + minutes;
// console.log(this.sessionDate ,this.sessionTime )
}
},
methods: {
dateFormate(date) {
var formatedDate = new Date(date).toDateString();
// var formatedDate = new Date(date).toLocaleTimeString()
return formatedDate;
},
//getting a value which is sending from activeSessionDetailsModal.vue
getDialogValue(valueFromChild) {
this.dialog = valueFromChild;
},
// TODO: temporary placement. should remove from here
getDialogValueForMenteeAccept(valueFromChild) {
this.dialog = valueFromChild;
},
// TODO: temporary placement. should remove from here
getDialogValueForMentorAccept(valueFromChild) {
this.dialog = valueFromChild;
},
getDialogValueFromCompletedSession(valueFromChild) {
this.dialog = valueFromChild;
},
reviewedTime(e) {
this.mentorReviewedTime = e;
// console.log(this.reviewedTime);
},
reviewedDate(e) {
this.mentorReviewedDate = e;
},
reviewedSpot(e) {
this.mentorReviewedSpot = e;
}
}
};
</script>
<style scoped>
.v-chip {
font-size: 12px;
border-radius: 0px 28px 28px 0px;
}
</style><file_sep><template>
<!-- navbar component used in app.vue -->
<div style="background-color: red;">
<!-- navigation bar -->
<v-toolbar
app
style="background-color:#007790"
dark
dense
v-if="this.$route.path!='/connect-promo'"
>
<div>
<!-- profile image with button -->
<!-- TODO: should hidden in desktop view -->
<!-- ONCLICK: it will open side navigation drawer in mobile view -->
<v-btn color="primary" dark @click.stop="drawer = !drawer" icon>
<v-avatar size="28" color="white">
<v-icon color="#007799">person</v-icon>
</v-avatar>
</v-btn>
</div>
<!-- TODO: this commenting section would be need in future -->
<!-- <v-toolbar-title class="headline text-uppercase ">
<v-menu offset-y>
<template v-slot:activator="{ on }">
<v-btn color="primary" dark v-on="on" icon>
<v-avatar :size="avatarSize" color="grey lighten-4">
<img src="@/assets/my.jpg" alt="avatar">
</v-avatar>
</v-btn>
</template>
<v-list>
<v-list-tile v-for="(item, index) in profileMenuItems" :key="index" @click="">
<v-list-tile-title>{{ item.title }}</v-list-tile-title>
<v-divider></v-divider>
<br>
</v-list-tile>
</v-list>
</v-menu>
</v-toolbar-title>-->
<v-spacer></v-spacer>
<v-toolbar-items>
<!-- <v-btn icon @click="messageDialog===true">
<v-icon>message</v-icon>
</v-btn>-->
<!-- {{localLoggedIn}}
<v-btn v-if="!localLoggedIn" icon @click="localLoggedInFunction">
<v-icon>notifications_none</v-icon>
</v-btn>-->
<!-- navigate to home -->
<!-- <v-btn icon to="/">
<v-icon>home</v-icon>
</v-btn>-->
</v-toolbar-items>
</v-toolbar>
<!-- navigation drawer -->
<v-navigation-drawer v-model="drawer" fixed clipped app>
<!-- side navbar's elements component bringing value to parents using emit-->
<navSidebar @drawer="stopdrawer" :isLoggedIn="isLoggedIn" />
</v-navigation-drawer>
</div>
</template>
<script>
// /home/didarul/Desktop/JOB/Project/careerki_pwa/src/service/commonUser.service.js
// api
import { UserInfoService } from "@/service/storage.service";
import { mapActions, mapGetters, mapState } from "vuex";
// mobile
import commonUserService from "@/service/commonUser.service.js";
import navSidebar from "@/views/modalView/sideNavBar";
export default {
components: {
navSidebar
},
props: ["isLoggedIn"],
data() {
return {
userId: UserInfoService.getUserID(),
avatarSize: 30,
drawer: null,
dialog_one: false,
dialog_two: false,
localLoggedIn: false,
profileMenuItems: [
{
title: "My Profile",
route: "profile-quizzes"
},
{
title: "Setting",
route: "profile-setting"
},
{
title: "Logout",
route: "login"
}
]
};
},
created() {
// this.fetchUserBasicInfoById({ user_id: this.userId });
// console.log("hunny bunny", this.userId);
// commonUserService.fetchUserBasicInfoById({ user_id: this.userId });
},
watch: {
isLoggedIn() {
// console.log(": ", this.$store.state.authStore);
}
},
computed: {},
methods: {
// ...mapActions("commonUserStore", ["fetchUserBasicInfoById"]),
//close the side drawer
stopdrawer(e) {
this.drawer = e;
}
}
};
</script>
<file_sep><template>
<div>
<v-layout row wrap style="margin-bottom:5px">
<v-flex xs4 px-2>
<div style class="boxStyle">
<span style="color:white">Pending Sessions</span>
<br />
<h2>03</h2>
</div>
</v-flex>
<v-flex xs4 px-2>
<div style class="boxStyle">
<span style="color:white">
Next Session
<br />22 april 2019 at 10am
</span>
</div>
</v-flex>
<v-flex xs4 px-2>
<div style class="boxStyle">
<span style="color:white">My Connections</span>
<br />
<h2>15</h2>
</div>
</v-flex>
</v-layout>
</div>
</template>
<style scoped>
.boxStyle {
background-color: #007799;
min-height: 70px;
width: 110px;
padding: 5px;
border-radius: 5px;
font-size: 11px;
color: white;
overflow: hidden;
}
</style>
<file_sep>import { state } from "./commonUser.states";
import { getters } from "./commonUser.getters";
import { actions } from "./commonUser.actions";
import { mutations } from "./commonUser.mutations";
export const commonUserStore = {
namespaced: true,
state,
getters,
actions,
mutations
};
<file_sep><template>
<div>
Hello hunny bunny i am so funny
</div>
</template>
<file_sep><template>
<div>
<v-content>
<v-container fill-height>
<v-layout justify-center>
<v-flex xs12 sm8 class="pr-3 pl-3">
<FormCard />
</v-flex>
<v-flex xs0 sm4 class="pr-2 pl-2">
<MotivationCard class="hidden-sm-and-down" />
</v-flex>
</v-layout>
</v-container>
</v-content>
</div>
</template>
<script>
import FormCard from "./reg.form";
import MotivationCard from "./reg.motivation";
export default {
components: {
FormCard,
MotivationCard
}
};
</script>
<file_sep>export const state = {
industries: [],
industriesById: [],
mentorsBasicDataByIndustriesId: [],
careerListOfIndustries: [],
salariesOfIndustry: []
};
<file_sep><template>
<div class="pt-5" style="background-color:white;text-align:center">
<div>
<v-btn small icon>
<v-icon>info</v-icon>
</v-btn>
<span>
You have not started
<br />any conversation yet
</span>
</div>
<h3 class="py-4" style="color:#007799">
Say Hello to <strong><NAME> </strong>
</h3>
<div class="pt-5 px-3">
<v-btn block large class="elevation-0" style="border-radius:25px;">
Start Conversation
</v-btn>
</div>
</div>
</template>
<style></style>
<file_sep>export const mutations = {
saveIndustries: (state, payload) => {
state.industries = payload;
},
saveIndustriesById: (state, payload) => {
state.industriesById = payload;
},
saveMentorsBasicDataByIndustriesId: (state, payload) => {
state.mentorsBasicDataByIndustriesId = payload;
},
saveCareerListOfIndustries: (state, payload) => {
state.careerListOfIndustries = payload;
},
saveSalariesOfIndustry: (state, payload) => {
state.salariesOfIndustry = payload;
}
};
<file_sep><template>
<div class="px-2 py-2" style="background-color:white">
<h4 style="color:#007799">Reference</h4>
<v-list three-line>
<template v-for="(item, index) in profileData.references">
<v-list-tile :key="index">
<v-list-tile-content>
<v-list-tile-title v-html="item.name"></v-list-tile-title>
<v-list-tile-sub-title
v-html="item.designation"
></v-list-tile-sub-title>
<v-list-tile-sub-title
v-html="item.company"
></v-list-tile-sub-title>
<v-list-tile-sub-title v-html="item.mobile"></v-list-tile-sub-title>
<v-list-tile-sub-title v-html="item.email"></v-list-tile-sub-title>
</v-list-tile-content>
</v-list-tile>
</template>
</v-list>
</div>
</template>
<script>
export default {
props: ["profileData", "isEditable"]
};
</script>
<file_sep><template>
<div >
<v-img
:src="require('@/assets/user.png')"
position="right center"
class="absoluteImageStyle"
></v-img>
<div style="padding-top:65px;text-align:center;padding-bottom:4px">
<p style="font-size:12px;line-height:1;margin-bottom: 4px;margin-top:5px">
<Strong
style="
color:#007790"
>{{ menteeDetailInfo[0].name }}</Strong
>
<!-- {{menteeDetailInfo}} -->
</p>
<p class="textStyle">{{ menteeDetailInfo[0].designation }}</p>
<p class="textStyle">{{ menteeDetailInfo[0].address }}</p>
</div>
</div>
</template>
<script>
export default {
props: ["menteeDetailInfo"],
async created() {
},
mounted() {},
computed: {},
methods: {}
};
</script>
<style scoped>
.absoluteImageStyle {
position: absolute;
left: 50%;
top: 2%;
margin-left: -55px;
min-width: 110px;
border-top-left-radius: 15px;
border-top-right-radius: 15px;
min-height: 100px;
}
.textStyle {
font-size: 12px;
line-height: 1;
margin-bottom: 2px;
}
</style>
<file_sep>import Vue from "vue";
import "./plugins/vuetify";
import App from "./App.vue";
import router from "./router";
import store from "./store/index";
import ApiService from "./service/api.service";
import { TokenService } from "./service/storage.service";
import { library } from "@fortawesome/fontawesome-svg-core";
import {
faBrain,
faMapMarkedAlt,
faCodeBranch
} from "@fortawesome/free-solid-svg-icons";
import { FontAwesomeIcon } from "@fortawesome/vue-fontawesome";
import "./registerServiceWorker";
import Vuelidate from "vuelidate";
import VueSwing from "vue-swing";
Vue.component("vue-swing", VueSwing);
Vue.use(Vuelidate);
// font awesome library integrate
library.add(faBrain, faMapMarkedAlt, faCodeBranch);
Vue.component("font-awesome-icon", FontAwesomeIcon);
// Set the base URL of the API
ApiService.init(process.env.VUE_APP_ROOT_API);
// If token exists set header
if (TokenService.getToken()) {
ApiService.setHeader();
//mount request header
ApiService.mount401Interceptor();
}
// ApiService.initProgressBarIndicator();
Vue.config.productionTip = false;
new Vue({
router,
store,
render: h => h(App)
}).$mount("#app");
<file_sep>export const getters = {
getMentorBacsicData: state => {
return state.mentorBacsicData;
},
getMentorDetailsData: state => {
return state.mentorDetailsData;
},
getMentorSessionCount: state => {
return state.mentorSessionCount;
},
getAllReviews: state => {
return state.allReviews;
},
getAllRandomMentorData: state => {
return state.allRandomMentorData;
},
getRatingAverage: state => {
return state.ratingAverage;
}
};
<file_sep><template>
<div>
<v-card class="elevation-3">
<v-list three-line>
<template v-for="item in items">
<v-list-tile :key="item.title">
<v-list-tile-avatar :color="item.color" dark>
<v-icon dark class="elevation-9">{{ item.icon }}</v-icon>
</v-list-tile-avatar>
<v-list-tile-content>
<v-list-tile-title v-html="item.title"></v-list-tile-title>
<v-list-tile-sub-title
v-html="item.subtitle"
></v-list-tile-sub-title>
</v-list-tile-content>
</v-list-tile>
</template>
</v-list>
</v-card>
<v-card class="mt-3">
<v-container>
<h2>Need Help?</h2>
<p>If you facing any problem you can contact with us</p>
<v-icon color="red">mobile_screen_share</v-icon>
<span>
<strong class="red--text">01820</strong>
</span>
<br />
<v-icon color="indigo">email</v-icon>
<span>
<strong class="indigo--text"><EMAIL></strong>
</span>
</v-container>
</v-card>
</div>
</template>
<script>
export default {
data: () => ({
items: [
{
color: "indigo",
icon: "notification_important",
title: "300+ Job Profile",
subtitle:
"CareerKi has 300+ job profile.Each Profile has brief discussion about this jobs salary,possiblity etc"
},
{
color: "purple",
icon: "people",
title: "100+ mentors",
subtitle:
"We have 100+ Proffessionals who are ready to suggest you how you become successfull"
},
{
color: "blue darken-3",
icon: "assessment",
title: "Assesment Test",
subtitle:
"<span class='text--primary'><NAME></span> — Do you have Paris recommendations? Have you ever been?"
},
{
color: "light-green darken-2",
icon: "find_replace",
title: "Pcychometric Test",
subtitle:
"<span class='text--primary'><NAME></span> — Do you have Paris recommendations? Have you ever been?"
}
]
})
};
</script>
<style></style>
<file_sep><template>
<!-- after completion her will show summery and user can give rating -->
<div>
<v-tabs :height="40" v-model="active" centered color="white" mb-5>
<v-tabs-slider color="#3B4042"></v-tabs-slider>
<v-tab v-for="tab in tabs" class="tabOption" :key="tab.value">{{tab.text}}</v-tab>
<v-tabs-items touchless >
<v-tab-item v-for="tab in tabs" :key="tab.value" ref="getTabValue">
<v-layout row wrap>
<v-flex v-if="tab.value==='message'">
<lastConversation/>
</v-flex>
<v-flex v-if="tab.value==='rating'">
<ratingQuestions :mode="mode" :session="session"/>
</v-flex>
<v-flex v-if="tab.value==='payment'">
<payment/>
</v-flex>
</v-layout>
</v-tab-item>
</v-tabs-items>
</v-tabs>
</div>
</template>
<script>
import lastConversation from "@/components/local/activeSessionDetailsModal/activeSessionDetailsModal.conversation.vue";
import ratingQuestions from "@/components/local/completeSessionDetailsModal/completeSessionDetailsModal.ratingQuestionsTab.vue";
import payment from "@/components/local/completeSessionDetailsModal/completeSessionDetailsModal.paymentTab.vue";
export default {
props: ["tabNumber","mode","session"],
data: () => ({
active: 0,
tabs: [
{
value: "message",
text: "message"
},
{
value: "rating",
text: "Rating & Review"
},
{
value: "payment",
text: "Payment"
}
]
}),
components: {
lastConversation,
ratingQuestions,
payment
},
watch: {
// set tabNumber to active
tabNumber() {
this.active = this.tabNumber;
},
//sending selectedTab Number to completeSession
//DetailsModal to hide hiderateto call action component
active() {
this.$emit("selectedTabNumber", this.active);
}
}
};
</script>
<style scoped>
.tabOption >>> .v-tabs__item:not(.v-tabs__item--active) {
color: #3b4042;
}
.tabOption >>> .v-tabs__item--active {
color: #007799;
}
</style><file_sep><template>
<v-layout row wrap>
<v-flex sm12 md12 lg6 style="padding:50px">
<v-container>
<v-flex xs4 sm4 md3 text-xs-left>
<v-img
:src="require('@/assets/icons/common/careerKiLogo.png')"
alt="CareerKi Logo"
contain
/>
</v-flex>
<v-flex xs12 sm12 md9 pt-5>
<v-flex xs12 sm12>
<h1 class="promo-header">
CareerKi Connect
<sup>BETA</sup>
</h1>
<p class="promo-para">
CareerKi Connect makes you prepared
<br />for your career
</p>
<p
class="promo-para-second"
>Find Professionals, Get Connected, Increase job opportunities.</p>
<v-btn id="connect-promo-btn" to="/connect" round color="#cf001c" dark large>
Connect Now
<span>
<v-icon right color="white">arrow_right_alt</v-icon>
</span>
</v-btn>
</v-flex>
</v-flex>
</v-container>
</v-flex>
<v-flex sm12 md12 lg6>
<v-img
class="hidden-md-and-down"
:src="require('@/assets/images/mask-promo.png')"
:aspect-ratio="1"
style="top:-2%;height: -webkit-fill-available;"
>
<v-img :src="require('@/assets/images/mob-view-promo.gif')" class="mob-position"></v-img>
</v-img>
<v-img
class="hidden-lg-and-up mob-position"
style="margin-bottom:150px"
:src="require('@/assets/images/mob-view-promo.gif')"
></v-img>
</v-flex>
</v-layout>
</template>
<script>
export default {};
</script>
<style lang="scss" scoped>
.base-margin {
margin-left: 40px;
}
.promo-header {
margin-top: 100px;
font-size: 2.5rem;
line-height: 2.5rem;
}
.promo-para {
margin-top: 20px;
font-size: 1.3rem;
}
sup {
color: gray;
}
@media only screen and (max-width: 700px) and (min-width: 601px) {
.mob-position {
height: 640px;
width: 350px;
}
}
@media only screen and (max-width: 600px) {
.mob-position {
height: 640px;
width: 350px;
left: 3%;
}
}
@media only screen and (min-width: 801px) {
.mob-position {
top: 20%;
left: 40%;
height: 640px;
width: 400px;
}
}
</style><file_sep><template>
<div>
<v-tabs :height="40" centered color="white" mb-5 v-model="selectedTabValue">
<v-tabs-slider color="orange"></v-tabs-slider>
<v-tab
@click="tabClick(tab.value)"
v-for="tab in tabs"
:key="tab.value"
class="tabOption"
>{{ tab.text }}</v-tab>
<v-tab-item v-for="tab in tabs" :key="tab.value">
<v-layout row wrap>
<v-flex v-for="(session, i) in sessionData" :key="i">
<sessionCard :mode="mode" :session="session" :sessionType="tab.value" class="my-1"/>
</v-flex>
</v-layout>
</v-tab-item>
</v-tabs>
</div>
</template>
<script>
import { UserInfoService } from "@/service/storage.service";
import sessionCard from "@/components/local/sessionDashboard/sessionDashboard.sessionCard";
// import data from "@/data/data.session.json";
import { mapActions, mapGetters } from "vuex";
export default {
components: {
sessionCard
},
props: ["sessionData", "mode"],
data: () => ({
sortedSessionList: [],
selectedTabValue:1,
tabs: [
{ value: "Active", text: "Active" },
{ value: "Pending", text: "Pending" },
{ value: "Complete", text: "Complete" }
],
// sessionData: data,
userId: UserInfoService.getUserID()
}),
created() {
// TODO: userId and session id will get from front end
// var userId = '5cbc5e1fd7422d09ec7ec20e';
// this.fetchActiveSessionListById({ user_id: this.userId });
// this.fetchPendingSessionListById({ user_id: this.userId });
// this.fetchCompleteSessionListById({ user_id: this.userId });
},
mounted() {},
computed: {},
methods: {
tabClick(value) {
this.$emit("setType", value);
// console.log(value);
}
}
};
</script>
<style scoped>
.tabOption >>> .v-tabs__item:not(.v-tabs__item--active) {
color: #007799;
}
.tabOption >>> .v-tabs__item--active {
color: orange;
}
</style>
<file_sep><template>
<div style="background-color:white" class="py-3">
<p class="text-xs-center">
<Strong>Your Top 6 Career Matches</Strong>
</p>
<v-layout row wrap class="pl-4">
<v-flex xs6 v-for="(career, i) in careersMatches" :key="i">
<span>{{ career }}</span>
<v-icon small color="#007799">keyboard_arrow_right</v-icon>
</v-flex>
</v-layout>
</div>
</template>
<script>
export default {
data: () => ({
careersMatches: [
"UI/UX Development",
"Data Science",
"Telecommunication",
"Software Development",
"Apps Development",
"Support Engineer"
]
})
};
</script>
<file_sep><template>
<div>
<v-layout row wrap>
<v-flex xs2>
<v-btn icon>
<v-icon>cancel</v-icon>
</v-btn>
</v-flex>
<v-flex xs10>
<p style="text-align:center;font-size:20px">Career Test</p>
</v-flex>
</v-layout>
</div>
</template>
<file_sep>/* eslint-disable no-console */
import { UserService } from "../../../service/auth.service";
export const actions = {
async login({ commit }, { username, password }) {
// commit("loginRequest");
try {
const response = await UserService.login(username, password);
console.log("response for wrong password: ", response);
if (response.data.success) {
console.log("calling loginSuccess mutation..");
commit("loginSuccess", response.data.data.token);
} else {
commit("loginError", {
errorCode: 401,
message: "Authentication Failed!"
});
}
// Redirect the user to the page he first tried to visit or to the home view
return response;
} catch (error) {
console.log(error);
commit("loginError", {
errorCode: error.errorCode,
message: error.message
});
return error;
}
},
async register({ commit },{ type, username, password }) {
console.log("from actions");
console.log(username);
console.log(password);
console.log(type);
try {
const response = await UserService.register(type, username, password);
return response;
} catch (e) {
return false;
}
},
logout({ commit }) {
UserService.logout();
commit("logoutSuccess");
// router.push("/auth/login");
},
refreshToken({ commit, state }) {
// If this is the first time the refreshToken has been called, make a request
// otherwise return the same promise to the caller
if (!state.refreshTokenPromise) {
const p = UserService.refreshToken();
commit("refreshTokenPromise", p);
// Wait for the UserService.refreshToken() to resolve. On success set the token and clear promise
// Clear the promise on error as well.
p.then(
response => {
commit("refreshTokenPromise", null);
commit("loginSuccess", response);
},
error => {
commit("refreshTokenPromise", null);
console.log(error);
}
);
}
return state.refreshTokenPromise;
}
};
<file_sep><template>
<div style=" text-align:center ;background-color:white;padding:10px">
<v-btn-toggle v-model="toggle_one" mandatory>
<v-btn disabled @click="modify = false" flat color="#007790" class="px-5">
<span>Requested</span>
</v-btn>
<v-btn @click="modify = true" flat color="#007790" class="px-5">
<span>Modified</span>
</v-btn>
</v-btn-toggle>
<v-layout row wrap>
<v-flex xs5>
<span>{{
dateConversion(getSessionDetails.sessionRequestedTime)
}}</span>
</v-flex>
<v-flex xs2>
<v-icon small>folder</v-icon>
</v-flex>
<v-flex xs5>
<span>{{
dateConversion(getSessionDetails.sessionModifiedStartTime)
}}</span>
</v-flex>
<v-flex xs5>
<span>{{
timeConversion(getSessionDetails.sessionRequestedTime)
}}</span>
</v-flex>
<v-flex xs2>
<v-icon small>folder</v-icon>
</v-flex>
<v-flex xs5>
<span>{{
timeConversion(getSessionDetails.sessionModifiedStartTime)
}}</span>
</v-flex>
<v-flex xs5>
<span>{{ getSessionDetails.sessionRequestedSpot }}</span>
</v-flex>
<v-flex xs2>
<v-icon small>folder</v-icon>
</v-flex>
<v-flex xs5>
<span>{{ getSessionDetails.sessionRequestedSpot }}</span>
</v-flex>
<v-flex xs5>
<span
v-for="(service, i) in getSessionDetails.serviceOffered"
:key="i"
>{{ service.name }}</span
>
</v-flex>
<v-flex xs2>
<v-icon small>folder</v-icon>
</v-flex>
<v-flex xs5>
<span
v-for="(service, i) in getSessionDetails.serviceOffered"
:key="i"
>{{ service.name }}</span
>
</v-flex>
<v-flex xs5>
<span>{{ getSessionDetails.sessionRequestedSpot }}</span>
</v-flex>
<v-flex xs2>
<v-icon small>folder</v-icon>
</v-flex>
<v-flex xs5>
<span>{{ getSessionDetails.sessionRequestedSpot }}</span>
</v-flex>
</v-layout>
</div>
</template>
<script>
export default {
props: ["getSessionDetails"],
data: () => ({
toggle_one: 0,
avatarSize: 6,
modify: false,
icons: [
{
title: "getSessionDetails.sessionRequestedSpot ",
icon: "folder",
iconClass: "grey lighten-1 grey--text"
},
{
title: "getSessionDetails.sessionStartTime",
icon: "folder",
iconClass: "grey lighten-1 grey--text"
}
],
items: [
{
icon: "folder",
iconClass: "grey lighten-1 grey--text",
title: "10 March 2019",
isEditable: true
},
{
icon: "folder",
iconClass: "grey lighten-1 grey--text",
title: "7:30 P.M",
isEditable: true
},
{
icon: "folder",
iconClass: "grey lighten-1 grey--text",
title: "Mirpur DOHS, Dhaka",
isEditable: true
},
{
icon: "folder",
iconClass: "grey lighten-1 grey--text",
title: "CV Review ,Mock Test",
isEditable: false
},
{
icon: "folder",
iconClass: "grey lighten-1 grey--text",
title: "2000$/Session",
isEditable: false
}
]
}),
async created() {},
mounted() {},
computed: {},
methods: {
dateConversion(value) {
// const timeUTC = this.session.sessionRequestedTime;
const year = new Date(value).getUTCFullYear();
const month = new Date(value).getUTCMonth();
const date = new Date(value).getUTCDate();
return year + "-" + month + "-" + date;
},
timeConversion(value) {
const hours = new Date(value).getHours();
const minutes = new Date(value).getMinutes();
return hours + ":" + minutes;
}
}
};
</script>
<style scoped>
.v-list__tile__avatar {
min-width: 20px;
}
.v-btn-toggle .v-btn:first-child {
border-top-left-radius: 15px;
border-bottom-left-radius: 15px;
}
.v-btn-toggle .v-btn:last-child {
border-top-right-radius: 15px;
border-bottom-right-radius: 15px;
}
.v-btn-toggle {
border-top-left-radius: 15px;
border-bottom-left-radius: 15px;
border-top-right-radius: 15px;
border-bottom-right-radius: 15px;
}
</style>
<file_sep>/* eslint-disable no-console */
// payment between mentor and mentee related all services
import ApiService from "./api.service";
const paymentService = {
//TODO: No services yet.might add in near future
// async fetchQuizzesByTopic(topicId) {
// try {
// const response = await ApiService.get(
// "quizzes?topicId=" + topicId + "&operation=topicQuizzes&type=Public"
// );
// // console.log(response);
// return response.data.data;
// } catch (error) {
// // console.log(error.response);
// }
// }
// };
// export default paymentService;
<file_sep><template>
<v-content style="background-color:white;" class="py-4">
<swiper :options="swiperOption">
<swiper-slide v-for="(item, i) in items" :key="i" mx>
<div style="text-align:center;color:#007799">
<v-icon size="54">{{ item.icon }}</v-icon>
<h3 class="px-5 mx-5">{{ item.text }}</h3>
</div>
</swiper-slide>
<!-- <div class="swiper-pagination" slot="pagination"></div> -->
</swiper>
</v-content>
</template>
<script>
import "swiper/dist/css/swiper.css";
import { swiper, swiperSlide } from "vue-awesome-swiper";
export default {
components: {
swiper,
swiperSlide
},
data: () => ({
email: "",
password: "",
swiperOption: {
spaceBetween: 30,
centeredSlides: true,
autoplay: {
delay: 2500,
disableOnInteraction: false
},
pagination: {
el: ".swiper-pagination",
clickable: true
}
},
items: [
{
text: "Find a mentor for career guidance",
icon: "attach_file"
},
{
text: "Improve your skill",
icon: "cloud"
},
{
text: "Connect with mentor",
icon: "attach_file"
}
]
})
};
</script>
<style scoped>
.v-carousel__controls {
background: white;
}
.v-carousel__controls >>> .v-btn >>> .v-btn__content {
font-size: 8px !important;
}
</style>
<file_sep>export const mutations = {
setUserID(state, payload) {
state.userID = payload;
},
setLoading: (state, payload) => {
state.loading += payload;
},
decrementLoading: (state, payload) => {
state.loading -= payload;
},
getUserInfo(state, payload) {
state.UserInfo = payload;
}
};
<file_sep><template>
<div style="background-color:white;padding:5px">
<h3 style="text-align:center">
Conversation feature is under Construction.Mentor will call you
</h3>
<!-- <v-layout
row
wrap
v-for="msg in messages"
:key="msg.msgId"
:class="{ reverse : msg.isMentor}"
style=" justify-content: center;
align-items: center; "
>
<v-flex xs2 px-2 py-2>
<v-avatar size="50">
<v-img :src="require('@/assets/my.jpg')"></v-img>
</v-avatar>
</v-flex>
<v-flex xs10 :class="{ mentorMsg : msg.isMentor}">
<span v-html="msg.msgBody.msg"></span>
<br>
<span style="font-size:10px">{{msg.msgBody.date}} {{msg.msgBody.time}}</span>
</v-flex>
<div></div>
</v-layout> -->
<div style="text-align:center">
<!-- <v-icon small>info</v-icon> -->
<!-- <span>
This conversation will be available
<br>for 30 days
</span> -->
</div>
<div style="text-align:center">
<!-- <router-link to="/" tag="p" style="padding-top:10px;color:#007799">View in message</router-link> -->
</div>
</div>
</template>
<script>
import messages from "@/data/data.messages";
export default {
data: () => ({
mentorMsg: {
color: "red"
},
menteeMsg: {
color: "red"
},
messages: messages,
mentee: 232,
mentor: 233
}),
computed: {
isMentor() {
var menteeId = 232;
if (menteeId === this.mentee) {
return true;
}
return false;
}
}
};
</script>
<style>
.mentorMsg {
text-align: right;
}
.reverse {
flex-direction: row-reverse;
}
.menteeMsg {
text-align: left;
}
</style>
<file_sep><template>
<div style="background-color:white">
<v-card>
<div class="startRatingPosition">
<v-rating v-model="rating" size="30" color="#3B4042" background-color="#3B4042"></v-rating>
</div>
<div style="text-align:center;">
<h3>{{card.title}}</h3>
<p>{{card.question}}</p>
<br>
<p>{{index+1}}/3</p>
</div>
<div v-if="showCalculation" style="padding:10px;text-align: center;">
<v-text-field v-model="review" counter="200" placeholder="Describe Your Experience(optional)"></v-text-field>
<v-btn flat color="#007799" @click="showPreview">Preview & Submit</v-btn>
</div>
</v-card>
</div>
</template>
<script>
export default {
props: ["card", "index","visibleCards"],
data: () => ({
review:'',
// reviewText:'',
rating: 0,
slideIndex: 0,
showCalculation: false
}),
watch: {
rating() {
this.card.rating = this.rating;
this.slideIndex = this.index;
if (this.slideIndex >= 2) {
this.showCalculation = true;
console.log(this.visibleCards);
// this.slideIndex =2
} else {
this.slideIndex++;
this.showCalculation = false;
}
console.log("asfasfasdf", this.slideIndex);
this.$emit("slideIndex", this.slideIndex);
}
},
computed:{
},
methods: {
reviewText(){
// console.log(this.review)
},
showPreview() {
var preview = true;
this.showCalculation = false;
console.log(this.showCalculation, preview);
this.$emit("previewShow", preview);
this.$emit('reviewText',this.review);
}
}
};
</script><file_sep>export const state = {
userBasicInfoById: [],
userSessionCountById: [],
allMessages: [],
allNotifications: [],
userDetailInfoById: [],
allSkills: [],
education: []
};
<file_sep><template>
<div>
<basicInfo class="mb-1" />
<skillTestProfile class="mb-1" />
<skilllTestBadges class="mb-1"></skilllTestBadges>
<skillTestresult class="mb-1"></skillTestresult>
</div>
</template>
<script>
import skilllTestBadges from "@/components/local/skillTestProfile/skillTestProfile.badges.vue";
import basicInfo from "@/components/local/skillTestProfile/skillTestProfile.basic.vue";
import skillTestProfile from "@/components/local/skillTestProfile/skillTestProfile.skills.vue";
import skillTestresult from "@/components/local/skillTestProfile/skillTestProfile.result.vue";
export default {
components: {
basicInfo,
skillTestProfile,
skilllTestBadges,
skillTestresult
},
data: () => ({
goals: ["Product Development", "Product Manager"]
})
};
</script><file_sep><template>
<div style="background-color:#eee">
<!-- mobile layout -->
<div style="background-color:white;" class="mb-1">
<v-layout row wrap class="modalHeader">
<v-btn icon @click="sendDialogValue()">
<v-icon>arrow_back</v-icon>
</v-btn>
</v-layout>
<basic :basicInfo="getSessionDetails" />
</div>
<div class="mb-1">
<sessionStatus :getSessionDetails="getSessionDetails" />
</div>
<div class="mb-1">
<sessionDetails :getSessionDetails="getSessionDetails" />
</div>
</div>
</template>
<script>
// global
import basic from "@/components/global/global.MentorMenteeBasicInfo.vue";
//local
import sessionStatus from "@/components/local/acceptFromMenteeModal/acceptFromMenteeModal.sessionStatus.vue";
import sessionDetails from "@/components/local/acceptFromMenteeModal/acceptFromMenteeModal.sessionInfo.vue";
import { UserInfoService } from "@/service/storage.service";
// api
import { mapActions, mapGetters } from "vuex";
export default {
props: ["getSessionDetails"],
data: () => ({
userId: UserInfoService.getUserID()
}),
components: {
basic,
sessionStatus,
sessionDetails
},
created() {
},
computed: {
},
methods: {
sendDialogValue() {
const dialogValue = false;
this.$emit("sendDialogValue", dialogValue);
}
}
};
</script>
<style>
.modalHeader {
height: 80px;
background-color: #eee;
border-bottom-left-radius: 25px;
border-bottom-right-radius: 25px;
}
</style>
<file_sep><template>
<div class="mb-5">
<span class="ml-4">
<Strong>By:</Strong>
</span>
<v-img
class="white--text"
:src="require('@/assets/comboBoxBack.png')"
width="100%"
height="80px"
>
<v-container fill-height fluid>
<v-list dense two-line>
<v-list-tile avatar>
<v-list-tile-avatar>
<img :src="require('@/assets/demoImageMF.png')" />
</v-list-tile-avatar>
<v-list-tile-content>
<v-list-tile-title>{{ session.mentorId.name }}</v-list-tile-title>
<v-list-tile-sub-title
>{{ session.mentorId.designation }} at
{{ session.mentorId.company }}</v-list-tile-sub-title
>
</v-list-tile-content>
</v-list-tile>
</v-list>
</v-container>
</v-img>
</div>
</template>
<script>
export default {
props: ["session"]
};
</script>
<style scoped>
.theme--light.v-list {
background: transparent;
color: rgba(0, 0, 0, 0.87);
}
</style>
<file_sep><template>
<div class="px-2 py-1" style="text-align:center;background-color:white">
<span v-if="!largText" v-html="bio"></span>
<!-- <span else v-html="bio2"></span> -->
<!-- <v-btn small flat color="#007799" @click="showMore">{{ buttonText }}</v-btn> -->
</div>
</template>
<script>
export default {
props: ["mentorBasic"],
data: () => ({
largText: false,
buttonText: "See More",
isEditable: false,
lengthOfTwoThird: 0,
bioLength: 0
}),
created() {
// (this.bioLength = this.mentorBasic.bio.length),
// (this.lengthOfTwoThird = this.bioLength / 5);
// console.log(this.lengthOfTwoThird )
},
computed: {
bio() {
return this.mentorBasic.bio;
// return this.mentorBasic.bio.slice(1, this.lengthOfTwoThird);
}
// bio2() {
// return this.mentorBasic.bio.slice(this.lengthOfTwoThird);
// }
},
methods: {
showMore() {
this.largText = !this.largText;
if (this.buttonText === "See more") {
this.buttonText = "See Less";
// console.log(this.buttonText);
} else {
this.buttonText = "See more";
// console.log(this.buttonText);
}
}
}
};
</script>
<style></style>
<file_sep><template>
<div style="margin-top:5px; background-color:#eee" v-if="!hide">
<v-layout row wrap px-2>
<v-flex xs12 class="cancelIconLeft">
<v-btn flat icon small>
<v-icon small @click="hide = true">cancel</v-icon>
</v-btn>
</v-flex>
<v-flex>
<v-layout row wrap>
<v-flex xs3>
<v-progress-circular
:rotate="360"
:size="70"
:width="8"
:value="value"
color="#007799"
>{{ value }}</v-progress-circular
>
</v-flex>
<v-flex xs9>
<span
>Lorem ipsum dolor sit amet consectetur, adipisicing elit. Illo
eaque possimus facere delectus arc.</span
>
<div class="cancelIconLeft">
<v-btn small flat color="#007799">Add Skills</v-btn>
</div>
</v-flex>
</v-layout>
</v-flex>
</v-layout>
</div>
</template>
<script>
export default {
props: ["profileData", "isEditable"],
data: () => ({
hide: false,
value: 75
})
};
</script>
<style scoped>
.cancelIconLeft {
display: flex;
align-items: flex-end;
flex-direction: column;
}
</style>
<file_sep><template>
<div>
<v-card>
<v-card-title
class="headline"
style="background-color:#007799;color:white"
>Select Your Group</v-card-title
>
<v-container>
<v-layout row wrap>
<v-flex xs4>
<v-btn
dark
color="#007799"
small
class="roundItem elevation-0"
:disabled="disabledS"
@click="clickS"
>Science</v-btn
>
</v-flex>
<v-flex xs4>
<v-btn
dark
color="#007799"
small
class="roundItem elevation-0"
:disabled="disabledH"
@click="clickH"
>Humanities</v-btn
>
</v-flex>
<v-flex xs4>
<v-btn
dark
color="#007799"
small
class="roundItem elevation-0"
:disabled="disabledC"
@click="clickC"
>Commerce</v-btn
>
</v-flex>
</v-layout>
</v-container>
<v-divider></v-divider>
<v-card-actions>
<v-spacer></v-spacer>
<v-btn color="primary" flat @click="saveResult">Next</v-btn>
</v-card-actions>
</v-card>
</div>
</template>
<script>
import careerTestButtons from "@/components/local/careerTest/careerTest.groupButtons";
export default {
components: {
careerTestButtons
},
data: () => ({
group: "",
disabledS: false,
disabledH: false,
disabledC: false,
buttonData: [
{
buttonName: "Science",
value: "S"
},
{
buttonName: "Humanities",
value: "H"
},
{
buttonName: "Commerce",
value: "C"
}
]
}),
props: ["result"],
methods: {
saveResult() {
// console.log(this.result);
this.$router.push({
name: "careerTestResult",
params: { results: this.result, group: this.group }
});
},
clickS() {
this.group = "S";
(this.disabledS = true),
(this.disabledH = false),
(this.disabledC = false);
},
clickH() {
this.group = "H";
(this.disabledS = false),
(this.disabledH = true),
(this.disabledC = false);
},
clickC() {
this.group = "C";
(this.disabledS = false),
(this.disabledH = false),
(this.disabledC = true);
}
}
};
</script>
<style scoped>
.roundItem {
border-radius: 15px;
}
</style>
<file_sep><template>
<v-card class="elevation-0" style="text-align:center;">
<div class="py-3">
<span class=" titleText">
<strong>
Get career advice
<br />from industry professionals
</strong>
</span>
<br />
<div class="mt-2">
<v-chip small>Select industry</v-chip>
<v-chip small>Find Mentors</v-chip>
<v-chip small>Get Connected</v-chip>
</div>
</div>
</v-card>
</template>
<script>
export default {};
</script>
<style scoped></style>
<file_sep><template>
<!-- navbar component used in app.vue -->
<div>
<!-- navigation bar -->
<v-toolbar app style="background-color:#007790" dark>
<!-- TODO: this commenting section would be need in future -->
<v-toolbar-title class="headline text-uppercase">
<!-- <v-menu offset-y>
<template v-slot:activator="{ on }"></template>
<v-btn color="primary" dark v-on="on" icon>
<v-avatar size="28" color="white">
<v-icon color="#007799">person</v-icon>
</v-avatar>
</v-btn>
</template>
<v-list>
<v-list-tile v-for="(item, index) in profileMenuItems" :key="index" >
<v-list-tile-title>{{ item.title }}</v-list-tile-title>
<v-divider></v-divider>
<br />
</v-list-tile>
</v-list>
</v-menu>-->
</v-toolbar-title>
<v-spacer></v-spacer>
<v-toolbar-items>
<!-- navigate to home -->
<v-btn flat to="/connect">
<v-icon>home</v-icon>
</v-btn>
<v-btn @click="navigateTo({ name: 'careerTest' })" small flat value="careerTest">Career Test</v-btn>
<v-btn flat @click="dialogLogin=true" v-if="getIsAuthenticate===false">Login</v-btn>
<v-btn flat to="/userProfile" v-if="getIsAuthenticate===true">My Profile</v-btn>
<v-btn flat @click="logoutUser()" v-if="getIsAuthenticate===true">Logout</v-btn>
</v-toolbar-items>
<v-toolbar-title class="headline text-uppercase">
<!-- <v-menu offset-y>
<template v-slot:activator="{ on }">
<v-btn color="primary" dark v-on="on" icon>
<v-avatar size="28" color="white">
<v-icon color="#007799">person</v-icon>
</v-avatar>
</v-btn>
</template>
<v-list>
<v-list-tile v-for="(item, index) in profileMenuItems" :key="index" >
<v-list-tile-title>{{ item.title }}</v-list-tile-title>
<v-divider></v-divider>
<br />
</v-list-tile>
</v-list>
</v-menu>-->
</v-toolbar-title>
</v-toolbar>
<v-dialog
v-model="dialogLogin"
fullscreen
hide-overlay
transition="slide-x-transition"
style="height: 100%;background-color:#eee"
>
<auth
@backToPage="getAuthDialogValue"
@closeModalFromLogin="closeModalFromLogin"
@closeModalFromRegistration="closeModalFromRegistration"
/>
<!-- <h1>asdfasfaf</h1> -->
<!-- <login/> -->
</v-dialog>
</div>
</template>
<script>
import auth from "@/views/modalView/auth";
import { mapActions, mapGetters, mapState } from "vuex";
export default {
components: {
auth
},
data: () => ({
dialogLogin: false,
profileMenuItems: [
{
title: "Profile",
route: "profile-quizzes"
},
{
title: "Setting",
route: "profile-setting"
},
{
title: "Logout",
route: "login"
}
]
}),
mounted() {
// console.log('asdfdas',this.$route);
},
computed: {
...mapGetters("authStore", ["getIsAuthenticate"])
},
methods: {
logoutUser() {
this.logout();
},
...mapActions("authStore", ["logout"]),
closeModalFromLogin(val) {
this.dialogLogin = val;
},
closeModalFromRegistration(val) {
this.dialogLogin = val;
},
getAuthDialogValue(val) {
this.dialogLogin = val;
},
//navigate to route
navigateTo(route) {
// console.log("I am route", route.name);
if (this.getIsAuthenticate === false && route.name === "careerTest") {
this.dialogLogin = true;
} else {
this.$router.push(route);
}
}
}
};
</script><file_sep><template>
<div>
<!-- mobile layout -->
<v-layout row wrap class="hidden-md-and-up">
<v-flex xs12 mt-3 mx-2 text-xs-center>
<!-- select -->
<!-- sdafd{{isLoggedIn}} -->
<v-select
solo
class="roundcombobox combobox"
v-model="model"
:items="industries"
item-value="_id"
item-text="name"
label="All Interest/Industry"
></v-select>
<!-- {{mentorsByIndustry}} -->
</v-flex>
<v-flex xs12>
<!-- career information(popular job and salary) component -->
<informaticsForMobile />
</v-flex>
<v-flex xs12 pt-2>
<!--user journey steps -->
<steps />
</v-flex>
<v-flex xs12 my-2>
<!-- {{mentors}} -->
<!-- mentor card slider component slides mentordashboard card left to right -->
<mentorCardSlider :mentors="mentors" />
</v-flex>
</v-layout>
<!-- desktop layout -->
<v-flex md12 px-1 class="hidden-sm-and-down">
<v-layout row wrap>
<v-flex md12 class="comboboxback">
<!-- middle combobox/selector for industry and intereset -->
<v-select
class="combobox my-5 mx-3"
solo
round
v-model="model"
:items="industryList"
item-text="name"
item-value="_id"
label="All Interest/Industry"
></v-select>
</v-flex>
<v-flex md12 px-5>
<v-layout row wrap>
<v-flex md12 lg6 px-2 py-2 v-for="(mentor, i) in mentors" :key="i">
<!-- mentors card for home page -->
<mentorDashboardCard :mentorBasic="mentor" />
</v-flex>
</v-layout>
</v-flex>
</v-layout>
</v-flex>
</div>
</template>
<script>
// api
import { mapActions, mapGetters, mapState } from "vuex";
// mobile
import informaticsForMobile from "@/components/local/home/mobile/home.informatics.mobile";
import steps from "@/components/local/home/mobile/home.steps";
import mentorCardSlider from "@/components/local/home/mobile/home.mentor.slider";
//global
import mentorDashboardCard from "@/components/global/global.mentor.dashboard.card";
import mentorsData from "@/data/data.mentor";
//web
import leftVerticalTab from "@/components/global/web/global.leftside.tab";
import userProfile from "@/components/local/home/web/home.user.profile";
export default {
name: "Home",
components: {
informaticsForMobile,
steps,
mentorCardSlider,
userProfile,
leftVerticalTab,
mentorDashboardCard
},
data() {
return {
color: "grey lighten-3",
// isTabViewValue: false,
height: 300,
tabs: [
{
text: "Connect",
icon: "map-marked-alt"
},
{
text: "Connect",
icon: "code-branch"
},
{
text: "Skill",
icon: "brain"
}
],
industryId: "",
sliderColor: "#007790",
value: null,
verticalText: false,
model: "",
industries: [],
items: [
"Technology Companies",
"Garments Industries",
"Chemicel Industries"
],
mentors: [],
randomMentor: []
};
},
// isLoggedIn(){
// return;
// }
created() {
let mentorResponse;
let industryResponse;
this.industryId = this.model;
this.pricerange = "";
if (this.industryId) {
this.fetchMentorsBasicDataByIndustriesId(this.industryId).catch(
function onError(error) {
console.log(error);
}
);
} else {
mentorResponse = this.fetchRandomMentorData().catch(function onError(
error
) {
console.log(error);
});
}
mentorResponse.then(res => {
this.mentors = res;
});
industryResponse = this.fetchIndustries().catch(function onError(error) {
console.log(error);
});
industryResponse.then(res => {
this.industries = res;
});
return this.industries, this.mentors;
},
// if(this.mentors===[]){
// this.mentors=this.randomMentor
// }
watch: {
model(el) {
// this.industryId = this.model;
this.pricerange = "";
const response = this.fetchMentorsBasicDataByIndustriesId({
industry_id: el,
price_range: this.pricerange
}).catch(function onError(error) {
console.log(error);
});
response.then(res => {
this.mentors = res;
// }
});
}
},
computed: {
...mapGetters("industryStore", ["getIndustries"]),
...mapState("industryStore", {
industryList: state => state.industries,
mentorsByIndustry: state => state.mentorsBasicDataByIndustriesId
}),
isLoggedIn: function () {
return !!TokenService.getToken()
}
},
methods: {
...mapActions("mentorStore", ["fetchRandomMentorData"]),
...mapActions("industryStore", [
"fetchIndustries",
"fetchMentorsBasicDataByIndustriesId"
])
}
};
</script>
<style>
@import "../../assets/styles/home.css";
.comboboxback {
background-color: #cccccc;
/* Used if the image is unavailable */
height: 150px;
/* You must set a specified height */
background-position: bottom;
/* Center the image */
background-repeat: no-repeat;
/* Do not repeat the image */
background-size: cover;
background-image: url("../../assets/comboBoxBack.png");
}
.centerItem {
justify-content: center;
display: flex;
align-items: center;
}
.combobox.v-autocomplete__content.v-menu__content .v-card {
align-items: center;
display: flex;
flex-direction: column;
}
.combobox.v-text-field.v-text-field--solo .v-label {
left: 30% !important;
}
.roundcombobox.v-text-field.v-text-field--solo:not(.v-text-field--solo-flat)
> .v-input__control
> .v-input__slot {
border-radius: 25px;
}
.v-text-field.v-text-field--enclosed .v-text-field__details {
margin-bottom: 0px;
}
</style>
<file_sep><template>
<div style="background-color:white" class="py-5 px-3">
<div v-for="(attribute, i) in psychometricAttribute" :key="i">
<v-expansion-panel
dense
flat
inset
class="expansionStyle"
style="backgorund-color:#eee"
>
<v-expansion-panel-content class="mb-1">
<template v-slot:header>
<v-layout row wrap>
<v-flex xs6>
<h4 style="color:#007799">{{ attribute.title }}</h4>
</v-flex>
<v-flex xs6>
<h4 style="color:#007799">{{ attribute.percent }}</h4>
</v-flex>
</v-layout>
</template>
<v-card class="px-4">
<div>
<h4>Characteristics</h4>
<v-list>
<v-list-tile
v-for="(Characteristic, index) in attribute.Characteristics"
:key="index"
>
<v-list-tile-content>
<span style="font-size:14px">{{ Characteristic }}</span>
</v-list-tile-content>
</v-list-tile>
</v-list>
<!-- {{ attribute.professions.group[0].science[0].jobs}} -->
<div v-if="group === 'S'">
<p><strong>Career Profile Suggetions</strong></p>
<v-list>
<v-list-tile
v-for="(jobs, index) in attribute.professions.group[0]
.science[0].jobs"
:key="index"
>
<!-- {{attribute.professions.group[0]}} -->
<v-list-tile-content>
<!-- <router-link :to="jobs.link" v-html="jobs.name"></router-link> -->
{{ jobs.subName }}<a :href="jobs.link">{{ jobs.name }}</a>
</v-list-tile-content>
</v-list-tile>
</v-list>
<div style="text-align: center;">
<a
:href="
attribute.professions.group[0].science[0].seeMore[0].link
"
>See more
{{
attribute.professions.group[0].science[0].seeMore[0].text
}}</a
>
</div>
</div>
<div v-if="group === 'H'">
<p><strong>Career Profile Suggetions</strong></p>
<v-list>
<v-list-tile
v-for="(jobs, index) in attribute.professions.group[0]
.humanities[0].jobs"
:key="index"
>
<v-list-tile-content>
<a :href="jobs.link">{{ jobs.subname }}{{ jobs.name }}</a>
</v-list-tile-content>
</v-list-tile>
</v-list>
<div style="text-align: center;">
<a
:href="
attribute.professions.group[0].humanities[0].seeMore[0]
.link
"
>See more
{{
attribute.professions.group[0].humanities[0].seeMore[0]
.text
}}</a
>
</div>
</div>
<div v-if="group === 'C'">
<p><strong>Career Profile Suggetions</strong></p>
<v-list>
<v-list-tile
v-for="(jobs, index) in attribute.professions.group[0]
.commerce[0].jobs"
:key="index"
>
<v-list-tile-content>
<a :href="jobs.link">{{ jobs.subname }}{{ jobs.name }}</a>
</v-list-tile-content>
</v-list-tile>
</v-list>
<div style="text-align: center;">
<a
:href="
attribute.professions.group[0].commerce[0].seeMore[0].link
"
>See more
{{
attribute.professions.group[0].commerce[0].seeMore[0].text
}}</a
>
</div>
</div>
</div>
</v-card>
</v-expansion-panel-content>
</v-expansion-panel>
</div>
</div>
</template>
<script>
import psychometricAttribute from "@/data/data.psychoData.json";
export default {
data: () => ({
group: "",
careerSuggession: [],
psychometricAttribute: psychometricAttribute
}),
mounted() {
// console.log(this.$route.params.group);
this.group = this.$route.params.group;
this.psychometricAttribute[0].percent = this.$route.params.results.R;
this.psychometricAttribute[1].percent = this.$route.params.results.I;
this.psychometricAttribute[2].percent = this.$route.params.results.A;
this.psychometricAttribute[3].percent = this.$route.params.results.S;
this.psychometricAttribute[4].percent = this.$route.params.results.E;
this.psychometricAttribute[5].percent = this.$route.params.results.C;
// console.log(this.$route.params.results)
}
};
</script>
<style scoped>
.expansionStyle >>> .v-expansion-panel__header {
background-color: #eee;
}
.theme--light.v-expansion-panel .v-expansion-panel__container {
border-top: 0px solid rgba(0, 0, 0, 0.12);
background-color: #fff;
color: rgba(0, 0, 0, 0.87);
}
.expansionStyle >>> .theme--light.v-icon {
color: #007799;
}
</style>
<file_sep><template>
<div>
<v-layout row wrap py2 px2>
<v-flex xs2 class="virticallyCenter">
<h4 class="curvBorderLeft">{{ sliderPositionNo + 1 }}/60</h4>
</v-flex>
<v-flex xs8>
<v-slider v-model="sliderPositionNo" :max="queQuantity" step="1" tick-size="1"></v-slider>
</v-flex>
<v-flex xs2 class="virticallyCenter">
<h4 class="curvBorderRight">{{ parseInt(((sliderPositionNo + 1) * 100) / 60, 10) }}%</h4>
</v-flex>
</v-layout>
</div>
</template>
<script>
export default {
props: ["cardNo"],
data: () => ({
queQuantity: 59,
sliderPositionNo: 0
}),
watch: {
cardNo() {
this.sliderPositionNo = this.cardNo;
},
sliderPositionNo(val) {
this.$emit("sliderPosition", val);
}
},
methods: {}
};
</script>
<style scoped>
.virticallyCenter {
justify-content: center;
display: flex;
flex-direction: column;
}
.sliderBack {
padding: 10px;
background-color: white;
}
.curvBorderLeft {
color: #007799;
text-align: center;
border-top-left-radius: 25px;
border-bottom-left-radius: 25px;
}
.curvBorderRight {
color: #007799;
text-align: center;
border-top-right-radius: 25px;
border-bottom-right-radius: 25px;
}
.v-slider input {
padding: 10px;
background-color: white;
}
</style>
<file_sep><template>
<div>
<v-layout row wrap>
<h5>experience information : 1</h5>
<v-flex xs12>
<v-text-field
:label="addComponentLabel[0]"
v-model="firstExDesignation"
required
></v-text-field>
<v-text-field
:label="addComponentLabel[1]"
v-model="firstExSession"
required
></v-text-field>
<v-text-field
:label="addComponentLabel[2]"
v-model="firstExCompany"
required
></v-text-field>
</v-flex>
<h5>experience information : 2</h5>
<v-flex xs12>
<v-text-field
:label="addComponentLabel[0]"
v-model="secondExDesignation"
required
></v-text-field>
<v-text-field
:label="addComponentLabel[1]"
v-model="secondExSession"
required
></v-text-field>
<v-text-field
:label="addComponentLabel[2]"
v-model="secondExCompany"
required
></v-text-field>
</v-flex>
</v-layout>
<v-layout row wrap>
<v-flex xs12>
<v-btn dark color="#007799" @click="updateProfile">update</v-btn>
</v-flex>
</v-layout>
<v-snackbar
v-model="snackbar"
:color="color"
:multi-line="mode === 'multi-line'"
:timeout="timeout"
:vertical="mode === 'vertical'"
:top="y === 'top'"
:bottom="y == 'bottom'"
>
{{ snackbartext }}
<v-btn dark flat @click="snackbar = false">Close</v-btn>
</v-snackbar>
</div>
</template>
<script>
import { mapActions } from "vuex";
import { UserInfoService } from "@/service/storage.service";
export default {
props: ["userDetails"],
data: () => ({
userId: UserInfoService.getUserID(),
course: "",
session: "",
university: "",
firstExDesignation: "",
firstExSession: "",
firstExCompany: "",
secondExDesignation: "",
secondExSession: "",
secondExCompany: "",
addComponentLabel: ["Designation", "Session Range", "Company Name"],
snackbar: false,
color: "error",
mode: "",
timeout: 4000,
snackbartext: "",
y: ""
}),
methods: {
...mapActions("commonUserStore", ["saveUpdateProfile"]),
async updateProfile() {
var experience = [
{
designation: this.firstExDesignation,
company: this.firstExSession,
duration: this.firstExCompany
},
{
company: this.secondExDesignation,
subject: this.secondExSession,
duration: this.secondExCompany
}
];
this.dynamicComponent = true;
// console.log("asdf");
let userData = {
experience: experience
};
// console.log(userData);
// console.log("my userId", this.userId);
try {
const response = await this.saveUpdateProfile({
user_data: userData,
user_id: this.userId
});
// console.log("check my response ", response);
if (!response) {
this.showAlert("Profile Update Failed!", "error", "top");
} else {
this.showAlert("Profile Update Successful!", "success", "top");
var getDialogValue = false;
this.$emit("setDialogValue", getDialogValue);
}
return response;
} catch (error) {
console.log(error);
return null;
}
},
showAlert(msg, color, ypos) {
this.color = color;
this.snackbartext = msg;
this.y = ypos;
this.snackbar = true;
}
}
};
</script>
<file_sep><template>
<div style=" text-align:left ;background-color:white;padding:10px">
<span>Skills: </span>
<span v-for="(skill, i) in menteeDetailInfo.skills" :key="i">
{{ skill.name }}</span
>
<br />
<span>Education: {{ menteeDetailInfo.education[0].subject }} </span>
<br />
<span>University: {{ menteeDetailInfo.education[0].university }}</span>
<br />
<span
>Employment Status:
{{ menteeDetailInfo.education[0].employmentStatus }}</span
>
<br />
</div>
</template>
<script>
export default {
props: ["menteeDetailInfo"],
async created() {},
mounted() {},
computed: {},
methods: {}
};
</script>
<file_sep><template>
<div style="background-color:white;padding:10px">
<v-carousel
v-if="!isPreviewShow"
:cycle="false"
:continuous="false"
hide-controls
hide-delimiters
light
v-model="carouselIndex"
>
<v-carousel-item v-for="(card, index) in visibleCards" :key="index">
<queCard
:visibleCards="visibleCards"
:card="card"
:index="carouselIndex"
@slideIndex="slideIndex"
@previewShow="previewShow"
@reviewText="reviewText"
></queCard>
</v-carousel-item>
</v-carousel>
<preview
v-if="isPreviewShow"
@averageRating="average"
:visibleCards="visibleCards"
:session="session"
:review="review"
/>
<div style="text-align:center;padding:10px" v-if="!isSubmitbtnShowing">
<v-btn
large
style="border-radius: 25px;"
class="elevation-0"
@click="submitRating"
>Submit</v-btn>
<br>
<span>
<v-icon>warning</v-icon>
<Strong>
This review will be visible on
<br>the professionals public profile
</Strong>
</span>
</div>
</div>
</template>
<script>
import { mapActions, mapGetters } from "vuex";
import preview from "@/components/local/completeSessionDetailsModal/completeSessionDetailsModal.questionCard.preview.vue";
import queCard from "./completeSessionDetailsModal.questionCard";
export default {
components: {
queCard,
preview
},
props: ["mode", "session"],
data: () => ({
isSubmitbtnShowing: false,
isPreviewShow: false,
carouselIndex: 0,
rating: 0,
review: "",
averageRating: 0,
visibleCards: [
{
title: "Expertise lavel",
question: "Your Mentor Expertise",
rating: 0
},
{
title: "Communication level",
question: "Your Mentor Communication",
rating: 0
},
{
title: "Friendliness level",
question: "Your Mentor Friendliness",
rating: 0
}
]
}),
methods: {
...mapActions("sessionStore", ["saveCompleteSessionRequest"]),
average(average) {
this.averageRating = average;
},
reviewText(val) {
this.review = val;
// console.log(val);
},
async submitRating() {
// console.log("asdfasdfasd");
this.isSubmitbtnShowing = true;
const updateTime = new Date().toISOString();
// console.log(this.mode);
let reviewRatingFromMentor = {
averageRatingFromMentor: this.averageRating,
sessionRatingFromMentor: {
experience: this.visibleCards[0].rating,
communication: this.visibleCards[1].rating,
friendliness: this.visibleCards[2].rating
},
sessionReviewFromMentor: this.review,
updatedTime: updateTime
};
let reviewRatingFromMentee = {
averageRatingFromUser: this.averageRating,
sessionRatingFromUser: {
experience: this.visibleCards[0].rating,
communication: this.visibleCards[1].rating,
friendliness: this.visibleCards[2].rating
},
sessionReviewFromUser: this.review,
updatedTime: updateTime
};
// passing updateData through action
try {
if (this.mode === "Mentor") {
var response = await this.saveCompleteSessionRequest({
completeSessionRequest: reviewRatingFromMentor,
sessionReviewFromMentor: this.review,
sessionId: this.session._id
});
} else if (this.mode === "User") {
response = await this.saveCompleteSessionRequest({
completeSessionRequest: reviewRatingFromMentee,
sessionId: this.session._id
});
} else {
return null;
}
} catch (error) {
console.log(error);
}
},
previewShow(val) {
// console.log("is PreviewShow", val);
this.isPreviewShow = val;
},
slideIndex(val) {
// console.log("from parent", val);
this.carouselIndex = val;
}
}
};
</script>
<file_sep><template>
<div>
<h3 class="py-3">Update Profile</h3>
<v-layout row wrap>
<h4>Personal Information</h4>
<v-flex xs12 md6>
<v-text-field v-model="userDetails.name" label="Name"></v-text-field>
</v-flex>
<v-flex xs12 md6>
<v-text-field
v-model="userDetails.company"
label="Your Company name"
></v-text-field>
</v-flex>
<v-flex xs12 md6>
<v-text-field
v-model="userDetails.designation"
label="Your Designation"
></v-text-field>
</v-flex>
<v-flex xs12 md6>
<v-text-field
v-model="userDetails.email"
label="Your Email Address"
></v-text-field>
</v-flex>
<v-flex xs12 md6>
<v-text-field
v-model="userDetails.phone"
label="Your Mobile Number"
></v-text-field>
</v-flex>
<v-flex xs12 md6>
<v-text-field
v-model="userDetails.address"
label="Your Address"
></v-text-field>
</v-flex>
</v-layout>
<v-layout row wrap>
<v-flex xs12>
<v-textarea
v-model="userDetails.bio"
name="input-7-1"
box
label="Write Somthing about you(More than 120 Caracter)"
auto-grow
></v-textarea>
</v-flex>
<v-flex xs12>
<v-btn color="#007799" dark @click="updateProfile()">Update</v-btn>
</v-flex>
</v-layout>
<v-snackbar
v-model="snackbar"
:color="color"
:multi-line="mode === 'multi-line'"
:timeout="timeout"
:vertical="mode === 'vertical'"
:top="y === 'top'"
:bottom="y == 'bottom'"
>
{{ snackbartext }}
<v-btn dark flat @click="snackbar = false">Close</v-btn>
</v-snackbar>
</div>
</template>
<script>
import { mapActions } from "vuex";
import { UserInfoService } from "@/service/storage.service";
export default {
props: ["userDetails"],
data: () => ({
snackbar: false,
color: "error",
mode: "",
timeout: 4000,
snackbartext: "",
y: "",
userId: UserInfoService.getUserID()
}),
methods: {
...mapActions("commonUserStore", ["saveUpdateProfile"]),
async updateProfile() {
this.dynamicComponent = true;
// console.log("asdf");
let userData = {
email: this.userDetails.email,
phone: this.userDetails.phone,
imageUrl: "",
address: this.userDetails.address,
company: this.userDetails.company,
designation: this.userDetails.designation,
bio: this.userDetails.bio,
skills: this.userDetails.skills,
topSkills: this.userDetails.topSkills,
name: this.userDetails.name
};
// console.log(userData);
// console.log("my userId", this.userId);
try {
const response = await this.saveUpdateProfile({
user_data: userData,
user_id: this.userId
});
// console.log("check my response ", response);
if (!response) {
this.showAlert("Profile Update Failed!", "error", "top");
} else {
this.showAlert("Profile Update Successful!", "success", "top");
var getDialogValue = false;
this.$emit("setDialogValue", getDialogValue);
}
return response;
} catch (error) {
console.log(error);
return null;
}
},
showAlert(msg, color, ypos) {
this.color = color;
this.snackbartext = msg;
this.y = ypos;
this.snackbar = true;
}
}
};
</script>
<file_sep>/* eslint-disable no-console */
import CareerTestService from "../../../service/careerTest.service";
export const actions = {
async fetchAllQuestion({ commit }) {
try {
const response = await CareerTestService.fetchAllQuestion();
commit("saveQuestions", response);
return response;
} catch (error) {
console.log(error);
}
},
async savePersonalityTestResult({ commit }, { test_result, user_id }) {
try {
const response = await CareerTestService.savePersonalityTestResult(
test_result,
user_id
);
commit("personalityTestResult", response);
return response;
} catch (error) {
console.log(error);
}
}
};
<file_sep>export const getters = {
getQuestions: state => {
return state.questions;
},
getSkillTestResult: state => {
return state.result;
},
getSkillTests: state => {
return state.skillTests;
},
getPopularTests: state => {
return state.popularTests;
},
getSuggestedTest: state => {
return state.suggestedTests;
},
getTestInfo: state => {
return state.test;
},
getTestDetails: state => {
return state.testDetails;
},
getAuthorizers: state => {
return state.authorizers;
}
};
<file_sep><template>
<div style="background-color:white">
<v-layout row wrap style="padding:10px">
<v-flex>
<span>
<Strong>My Skills</Strong>
</span>
<v-btn flat style="color:#007799">
Add
<v-icon>add</v-icon>
</v-btn>
</v-flex>
<v-flex style="text-align:right">
<span><Strong>See All</Strong></span>
</v-flex>
</v-layout>
<div style="padding:10px">
<v-chip v-for="(skill,i) in skills" :key="i">
{{skill}}
</v-chip>
</div>
</div>
</template>
<script>
export default {
data: () => ({
skills: ["Vue", "React", "Java"]
})
};
</script>
<style >
</style><file_sep>export const getters = {
getMenteeReviewOfSession: state => {
return state.menteeReviewOfSession;
},
getAllReviewsFromMentorsAgainstMentee: state => {
return state.allReviewsFromMentorsAgainstMentee;
}
};
<file_sep><template>
<div style="background-color:#eee;margin-bottom:25px">
<!-- mobile layout -->
<v-layout>
<v-flex style=" display: flex;
justify-content: flex-end;">
<v-btn icon small @click.stop="dialog = true">
<v-icon color="#007799" dark>add</v-icon>
</v-btn>
<v-dialog
v-if="dialog"
v-model="dialog"
fullscreen
hide-overlay
transition="slide-x-transition"
style="height: 100%;background-color:#eee"
>
<div v-for="(detail, i) in userDetails" :key="i">
<profileEditModal @setDialogValue="setDialogValue" :userDetails="detail" />
</div>
</v-dialog>
</v-flex>
</v-layout>
<div v-for="(detail, i) in userDetails" :key="i">
<div style="background-color:white;">
<v-layout row wrap class="modalHeader">
<v-flex xs12>
<div class="absoluteImageStyle">
<v-avatar size="120" color="#007799" style="margin-left:18px">
<v-icon size="100" dark>person</v-icon>
</v-avatar>
<!-- <v-avatar size="120" style="margin-left:18px">
<v-icon size="120" >person</v-icon>-->
<!-- <v-img :src="require('@/assets/user.png')" class="imgBorder" position="right center"></v-img> -->
<!-- </v-avatar> -->
</div>
</v-flex>
</v-layout>
<basic :isEditable="isEditable" :profileData="detail" />
</div>
<addSkill :profileData="detail" />
<bio :profileData="detail" class="mb-1" />
<skills :profileData="detail" class="mb-1" />
<exp :profileData="detail" class="mb-1" />
<edu :profileData="detail" class="mb-1" />
<!-- <reference :profileData="detail" class="mb-1"/>
<recomendation :profileData="detail" class="mb-1"/>-->
</div>
</div>
</template>
<script>
import profileEditModal from "@/views/modalView/profileUpdateModal.vue";
import { UserInfoService } from "@/service/storage.service";
import lazyLoadComponent from "@/utils/lazy-load-component.js";
import skeletonBox from "@/components/global/global.skeletonBox.vue";
import basic from "@/components/local/userProfile/local.userProfile.basic.vue";
import addSkill from "@/components/local/userProfile/local.userProfile.addSkill.vue";
import skills from "@/components/local/userProfile/local.userProfile.skills.vue";
import profileData from "@/data/data.profile.json";
import { mapActions, mapGetters, mapState } from "vuex";
export default {
data: () => ({
direction: "top",
dialog: false,
fab: false,
fling: false,
hover: false,
tabs: null,
transition: "slide-y-reverse-transition",
isEditable: false,
userDetails: [],
userId: UserInfoService.getUserID()
}),
async created() {
// console.log(this.userId)
this.userDetails = await this.fetchUserDetailInfoById({
user_id: this.userId
});
// console.log("from main", this.userDetails);
},
computed: {
// ...mapState("commonUserStore",{
// profileData: state => state.userDetailInfoById }
// ),
},
methods: {
...mapActions("commonUserStore", ["fetchUserDetailInfoById"]),
setDialogValue(el) {
this.dialog = el;
}
},
components: {
basic,
addSkill,
skills,
profileEditModal,
bio: lazyLoadComponent({
componentFactory: () =>
import("@/components/local/userProfile/local.userProfile.bio.vue"),
loading: skeletonBox
}),
exp: lazyLoadComponent({
componentFactory: () =>
import("@/components/local/userProfile/local.userProfile.expList.vue"),
loading: skeletonBox
}),
edu: lazyLoadComponent({
componentFactory: () =>
import("@/components/local/userProfile/local.userProfile.eduList.vue"),
loading: skeletonBox
}),
reference: lazyLoadComponent({
componentFactory: () =>
import(
"@/components/local/userProfile/local.userProfile.reference.vue"
),
loading: skeletonBox
}),
recomendation: lazyLoadComponent({
componentFactory: () =>
import(
"@/components/local/userProfile/local.userProfile.recomendation.vue"
),
loading: skeletonBox
})
}
};
</script>
<style scoped>
/* #create .v-speed-dial {
position: absolute;
}
#create .v-btn--floating {
position: relative;
} */
.v-btn--bottom:not(.v-btn--absolute) {
bottom: 0px;
}
.v-btn--block {
margin-bottom: 0px;
}
.v-btn--icon {
background: transparent;
-webkit-box-shadow: none !important;
box-shadow: none !important;
border-radius: 0%;
-webkit-box-pack: center;
-ms-flex-pack: center;
justify-content: center;
min-width: 0;
/* width: 100%; */
}
.imgBorder {
border-radius: 50%;
border: 5px solid white;
}
.absoluteImageStyle {
position: absolute;
position: absolute;
left: 50%;
top: 2%;
margin-left: -75px;
min-width: 110px;
border-top-left-radius: 15px;
border-top-right-radius: 15px;
min-height: 100px;
}
.modalHeader {
height: 80px;
background-color: #eee;
border-bottom-left-radius: 25px;
border-bottom-right-radius: 25px;
}
</style>
<file_sep><template>
<div>
<v-card class="elevation-1" :height="350">
<v-layout row wrap px-3>
<v-flex xs12 py-3>
<!-- {{index}} -->
<v-img
:src="require('@/assets/question.png')"
contain
:min-height="130"
:max-height="130"
:min-width="100"
data-srcset="path/logo/logo-large.png 2x"
class="swiper-lazy"
/>
</v-flex>
<v-flex xs12 style="height: 130px">
<h3>Q.{{ (index + 1) }}</h3>
<p style="font-size:17px">{{ (card.question) }}</p>
</v-flex>
<v-flex xs12 style="text-align:center">
<v-btn @click="callIfAnswerNo(card.category, index)" :disabled="clickedNo">No</v-btn>
<v-btn @click="callIfAnswerYes(card.category, index)" :disabled="clickedYes">Yes</v-btn>
</v-flex>
</v-layout>
</v-card>
</div>
</template>
<script>
import "swiper/dist/css/swiper.css";
export default {
props: ["card", "index", "answers", "nav", "swiper"],
data: () => ({
cardChange: false,
questionNumber: 0,
clickedYes: false,
clickedNo: false
}),
mounted() {
// console.log(this.$refs.)
},
methods: {
callIfAnswerYes(category, index) {
// console.log(category);
// this.cardChange=true
// this.swiper();
this.answers.forEach(function(answer) {
// this.$emit("change",this.cardChange)
if (answer.text === category) {
answer.value = answer.value + 1;
index++;
}
});
// console.log(this.answers);
// console.log(this.answers);
this.$emit("rowAnswers", this.answers);
this.questionNumber = index;
// console.log(this.questionNumber)
this.$emit("questionNumber", this.questionNumber);
// console.log(this.answers);
this.clickedYes = true;
this.clickedNo = false;
this.$emit("nextSlide");
},
callIfAnswerNo(category, index) {
this.cardChange = true;
this.$emit("change", this.cardChange);
this.questionNumber = this.questionNumber + 1;
// console.log(this.questionNumber)
var vm = this;
// vm.$emit = ("index", index);
// console.log(this.result);
// console.log(this.answers);
this.clickedNo = true;
this.clickedYes = false;
this.$emit("nextSlide");
}
}
};
</script>
<file_sep><template>
<div>
<v-img :src="require('@/assets/demoImageMF.png')" class="absoluteImageStyle"></v-img>
<div style="padding-top:60px;text-align:center">
<p style="font-size:12px;line-height:1;margin-bottom: 4px;">
<Strong style="
color:#007790">{{ session.mentorId.name }}</Strong>
</p>
<!-- {{mentorBasic}} -->
<p class="textStyle">{{ session.mentorId.designation }}</p>
<p class="textStyle">{{ session.mentorId.address }}</p>
</div>
</div>
</template>
<script>
export default {
props: ["session"]
};
</script>
<style>
.absoluteImageStyle {
position: absolute;
position: absolute;
left: 50%;
top: 2%;
margin-left: -55px;
min-width: 110px;
border-top-left-radius: 15px;
border-top-right-radius: 15px;
min-height: 100px;
}
.textStyle {
font-size: 12px;
line-height: 1;
margin-bottom: 2px;
}
</style><file_sep><template>
<div style="background-color:white;">
<!-- connection search bar-->
<v-layout
row
wrap
style="background-color:#EEE;border-bottom-left-radius:25px;border-bottom-right-radius:25px"
>
<v-flex xs12 mt-3 mx-2 text-xs-center>
<v-text-field
@click="goToSkillSearchPage"
readonly
class="roundcombobox combobox"
solo
label="Search More Skill Tests"
append-icon="keyboard_arrow_right"
></v-text-field>
</v-flex>
</v-layout>
<v-layout row wrap>
<!-- filter connections according to industry/interest selection(autoComplete) -->
<!-- <v-flex xs6>
<div>
<v-select
solo
class="px-2 py-2 elevation-0"
:items="categoryData"
v-model="selectedCateogry"
item-text="name"
item-value="_id"
label="Category"
hide-details
hide-selected
></v-select>
</div>
</v-flex>-->
<!-- filter connections according to price range selection(autoComplete)-->
<!-- <v-flex xs6>
<v-select
solo
class="px-2 py-2 elevation-0"
hide-details
hide-selected
v-model="selectedTopic"
:items="topicData"
item-text="name"
item-value="_id"
label="Topic"
></v-select>
</v-flex>-->
<div style="padding:15px">
<h3>Popular Topics</h3>
<br />
<v-chip small v-for="(topic,i) in topics" :key="i">{{topic.name}}</v-chip>
</div>
</v-layout>
</div>
</template>
<script>
import { mapGetters, mapActions } from "vuex";
export default {
data() {
return {
topics: []
};
},
created() {
this.fetchAllTopics()
.then(res => {
this.topics = res;
})
.catch(err => {
console.log(err);
});
},
methods: {
...mapActions("skillTestStore", ["fetchAllTopics"]),
goToSkillSearchPage() {
console.log("clicked the search section");
//go to search page of skill test with selected topic
// this.$router.push({ name: "SkillTestSearch" });
}
}
};
</script><file_sep><template>
<v-card
class="elevation-1 mentorCardStyle"
style=" border-radius:5px;background-color:#ECEFF1;padding:1px;
"
flat
>
<v-img
:src="imageUrl(test.image)"
:aspect-ratio="2"
class="imgCardsize"
position=" center right"
>
<v-container fill-height pa-0>
<v-list style=" background-color:transparent;">
<v-list-tile class="listTile">
<v-list-tile-title>
<p
style="color:#007790"
class="font-weight-medium subText text-xs-center"
>{{test.tag}}</p>
</v-list-tile-title>
</v-list-tile>
</v-list>
</v-container>
</v-img>
<v-card-text style="white-space:pre-wrap;background-color:white ">
<h4>{{test.title}}</h4>
<span class="subText">{{test.description}}</span>
<br />
<span class="subText">Publisher :</span>
<span class="subText">
<strong>{{test.publisherId.name}}</strong>
</span>
<br />
<span class="subText">Price :</span>
<span class="subText">
<strong>{{price}}</strong>
</span>
<v-layout row wrap style="color:#007790;">
<v-flex xs1>
<span class="subText">
<strong>{{test.rating}}</strong>
</span>
</v-flex>
<v-flex xs7>
<v-rating
readonly
background-color="black lighten-3"
color="black"
size="15"
v-model="rating"
></v-rating>
</v-flex>
</v-layout>
</v-card-text>
<v-card-actions>
<v-btn
block
class="elevation-0"
@click="dialog=true"
style="border-radius:5px;color:#007799"
color="white"
>Take the test</v-btn>
</v-card-actions>
<v-dialog v-model="dialog" fullscreen hide-overlay transition="dialog-bottom-transition">
<skillTest :test-id='test._id' @closeTestModal="closeTheTest" />
</v-dialog>
</v-card>
</template>
<script>
import skillTest from "@/views/modalView/skillTestModal.vue";
export default {
props: ["test"],
components: {
skillTest
},
data: () => ({
dialog: false,
rating: 0
}),
created() {
this.rating = this.test.rating;
},
computed: {},
methods: {
closeTheTest() {
this.dialog = false;
},
imageUrl(image) {
if (!image || image === undefined) {
return "https://placehold.it/550x300&text=Test Image";
}
return process.env.VUE_APP_ROOT_API + "/static/" + image;
}
}
};
</script>
<style scoped>
@import "../../../assets/styles/home.css";
.v-card__actions {
padding: 1px;
}
@media screen and (min-width: 1640px) {
.imgCardsize {
max-width: 400px;
min-width: 300px;
}
}
@media screen and (max-width: 1640px) and (min-width: 1055px) {
.imgCardsize {
max-width: 300px;
min-width: 200px;
}
}
@media screen and (max-width: 1055px) {
.imgCardsize {
max-width: 400px;
min-width: 300px;
}
}
.centerItem {
justify-content: center;
display: flex;
align-items: center;
}
.mentorCardStyle {
/* height: 350px */
}
.listTile >>> .v-list__tile {
padding: 0px 0px !important;
/* height: 28px !important; */
}
.v-list__tile__title {
padding: 0px 5px 1px 5px;
padding-left: 5px;
background-color: white;
border-top-right-radius: 10px;
border-bottom-right-radius: 10px;
}
.v-dialog__container {
display: -webkit-box !important;
vertical-align: middle;
}
.v-btn--bottom:not(.v-btn--absolute) {
bottom: 0px;
}
.v-btn--block {
margin-bottom: 0px;
}
.v-btn--icon {
background: transparent;
-webkit-box-shadow: none !important;
box-shadow: none !important;
border-radius: 0%;
-webkit-box-pack: center;
-ms-flex-pack: center;
justify-content: center;
min-width: 0;
width: 100%;
}
</style><file_sep>//handling token to and from localStorage
const TOKEN_KEY = "access_token";
const REFRESH_TOKEN_KEY = "refresh_token";
const USER_ID = "user_id";
const USER_NAME = "user_name";
/**
* Manage the how Access Tokens are being stored and retreived from storage.
*
* Current implementation stores to localStorage. Local Storage should always be
* accessed through this instace.
**/
const TokenService = {
getToken() {
return localStorage.getItem(TOKEN_KEY);
},
saveToken(accessToken) {
localStorage.setItem(TOKEN_KEY, accessToken);
},
removeToken() {
localStorage.removeItem(TOKEN_KEY);
},
getRefreshToken() {
return localStorage.getItem(REFRESH_TOKEN_KEY);
},
saveRefreshToken(refreshToken) {
localStorage.setItem(REFRESH_TOKEN_KEY, refreshToken);
},
removeRefreshToken() {
localStorage.removeItem(REFRESH_TOKEN_KEY);
}
};
const UserInfoService = {
getUserID() {
return localStorage.getItem(USER_ID);
},
saveUserID(userID) {
localStorage.setItem(USER_ID, userID);
},
removeUserID() {
localStorage.removeItem(USER_ID);
},
getUserName() {
return localStorage.getItem(USER_ID);
},
saveUserName(userName) {
localStorage.setItem(USER_NAME, userName);
},
removeUserName() {
localStorage.removeItem(USER_NAME);
}
};
export { TokenService, UserInfoService };
<file_sep><template>
<div>
<v-layout row wrap style="text-align:center;color:white">
<!-- <v-flex xs12>
<h3>
Hello !
<br />Sign Up to get Started
</h3>
</v-flex>-->
<v-flex xs12 px-5 pt-3>
<v-layout row wrap>
<v-flex xs12 offset-md3 md6>
<v-text-field
v-model="username"
label="Username"
placeholder="Enter your username here"
color="white"
dark
readonly
></v-text-field>
</v-flex>
<v-flex>
<v-layout row wrap>
<v-flex xs12 offset-md3 md6>
<v-text-field
required
@input="$v.password.$touch()"
@blur="$v.password.$touch()"
:error-messages="passwordErrors"
dark
:append-icon="show2 ? 'visibility' : 'visibility_off'"
color="white"
:type="show2 ? 'text' : 'password'"
v-model="password"
label="Password"
hint="At least 6 characters"
placeholder="Enter your password here"
@click:append="show2 = !show2"
></v-text-field>
</v-flex>
</v-layout>
<v-layout row wrap>
<v-flex xs12 text-xs-center>
<v-btn
id="user-register-arrow-btn"
dark
outline
small
@click="handleSubmit()"
>Register</v-btn>
</v-flex>
</v-layout>
</v-flex>
</v-layout>
<!-- <h4 class="py-4">
Allready have an account?
<router-link style="color:white" to="/auth/login">Login</router-link>
</h4>-->
</v-flex>
<!-- <v-flex xs12>
<v-img
class="roadPosition"
:src="require('@/assets/authAbsolutePath.png')"
:max-width="250"
:max-height="250"
></v-img>
</v-flex>-->
<v-snackbar
v-model="snackbar"
:color="color"
:multi-line="mode === 'multi-line'"
:timeout="timeout"
:vertical="mode === 'vertical'"
:top="y === 'top'"
:bottom="y == 'bottom'"
>
{{ snackbartext }}
<v-btn dark flat @click="snackbar = false">Close</v-btn>
</v-snackbar>
</v-layout>
</div>
</template>
<script>
import { mapGetters, mapActions } from "vuex";
import { validationMixin } from "vuelidate";
import { required, minLength } from "vuelidate/lib/validators";
export default {
// Mixins are a flexible way to distribute reusable functionalities for Vue components
/* here validationMixin has method,computed property
like functionalities which will merge with your custom functionalities
*/
props: ["getUsername", "getType"],
mixins: [validationMixin],
// difine value of vuelidate variables
validations: {
password: {
required,
minLength: minLength(6)
}
},
data: () => ({
show2: false,
type: "",
username: "",
password: "",
snackbar: false,
color: "error",
mode: "",
timeout: 4000,
snackbartext: "",
y: ""
}),
mounted() {
this.username = this.getUsername;
this.type = this.getType;
},
created() {},
computed: {
...mapGetters("authStore", [
"authenticating",
"authenticationError",
"authenticationErrorCode"
]),
passwordErrors() {
const errors = [];
if (!this.$v.password.$dirty) return errors;
!this.$v.password.minLength &&
errors.push("Password should be minimum 6 characters long");
!this.$v.password.required && errors.push("Password is required.");
return errors;
}
},
methods: {
...mapActions("authStore", ["register", "login"]),
//submit
async handleSubmit() {
this.$v.$touch();
if (!this.$v.$invalid) {
const response = await this.register({
type: this.type,
username: this.username,
password: <PASSWORD>
});
if (!response) {
this.showAlert("Registration Failed! ", "error", "top");
} else {
if (typeof response.data != "undefined") {
if (response.data.success !== true) {
this.showAlert(response.data.message, "error", "top");
} else {
this.showAlert(
"Registration Completed Successfully!",
"success",
"top"
);
//for auto login
this.login({
username: this.username,
password: <PASSWORD>
}).then(login => {
if (login) {
this.$router.push(this.$route.query.redirect || "/connect");
const registrationDialog = false;
this.$emit("registrationDialog", registrationDialog);
} else {
this.showAlert("Auto Login Failed!", "error", "top");
}
});
}
} else {
this.showAlert("Registration Failed! ", "error", "top");
}
}
}
},
//snackbar show alert function
showAlert(msg, color, ypos) {
this.color = color;
this.snackbartext = msg;
this.y = ypos;
this.snackbar = true;
}
}
};
</script>
<style scoped>
.input-group--focused > .primary--text {
caret-color: white !important;
color: white !important;
}
.curvyDiv {
/* display: absolute; */
flex: 1;
height: 100%;
/* background-color: # */
/* width:fit-content; */
/* background-image:url('../../../assets/authBackground.png') */
}
</style>
<file_sep><template>
<div class="px-2 py-2" style="background-color:white">
<h4 style="color:#007799">Education</h4>
<v-list three-line>
<template v-for="(item, index) in profileData.education">
<v-list-tile :key="index" avatar>
<v-list-tile-avatar v-if="showMe">
<img :src="item.icon" />
</v-list-tile-avatar>
<v-list-tile-content>
<v-list-tile-title v-html="item.university"></v-list-tile-title>
<v-list-tile-sub-title
v-html="item.subject"
></v-list-tile-sub-title>
<v-list-tile-sub-title
v-html="item.duration"
></v-list-tile-sub-title>
</v-list-tile-content>
</v-list-tile>
</template>
</v-list>
</div>
</template>
<script>
export default {
props: ["profileData", "isEditable"],
data: () => ({
showMe: false
})
};
</script>
<file_sep><template>
<div class="px-2 py-2" style="background-color:white">
<h4 style="color:#007799">Experience</h4>
<v-list three-line>
<template v-for="(item, index) in profileData.experience">
<v-list-tile :key="index" avatar>
<v-list-tile-avatar v-if="showMe">
<img :src="item.icon" />
</v-list-tile-avatar>
<v-list-tile-content>
<v-list-tile-title v-html="item.designation"></v-list-tile-title>
<v-list-tile-sub-title
v-html="item.company"
></v-list-tile-sub-title>
<v-list-tile-sub-title
v-html="item.duration"
></v-list-tile-sub-title>
</v-list-tile-content>
</v-list-tile>
</template>
</v-list>
</div>
</template>
<script>
export default {
props: ["profileData", "isEditable"],
data: () => ({
showMe: false
})
};
</script>
<file_sep><template>
<v-card style="text-align:center; background-color:#EEEEEE" class="py-2 elevation-0">
<!-- <v-container style="text-align:center"> -->
<v-layout row wrap py-4>
<v-flex md12 class="centerItem">
<v-img
:src="require('@/assets/user.png')"
alt="avatar"
style="border-radius:50%"
aspect-ratio="1"
:max-width="200"
:max-height="200"
position="top center"
></v-img>
</v-flex>
</v-layout>
<h3 style="color:#007790;" class="font-weight-medium py-3">
<strong>{{userBasic[0].name}}</strong>
</h3>
<v-btn small flat class="my-3" to="/userProfile">View Profile</v-btn>
<v-card flat class="mx-2">
<v-layout>
<v-flex
style="font-size:1.33vh"
md4
v-for="tabinfo in tabInformations"
:key="tabinfo.id"
px-2
>
<span>{{ tabinfo.headline }}</span>
<br />
<span>{{ tabinfo.value }}</span>
</v-flex>
</v-layout>
</v-card>
<v-card></v-card>
</v-card>
</template>
<script>
import { mapActions, mapGetters, mapState } from "vuex";
import { UserInfoService } from "@/service/storage.service";
export default {
data: () => ({
userId: UserInfoService.getUserID(),
userBasic: [],
avatarSize: 200,
tile: false,
tabInformations: [
{
id: 1,
headline: "Complete Sessions",
value: "0"
},
{
id: 2,
headline: "Active Sessions",
value: "0"
},
{
id: 3,
headline: "Pending Sessions",
value: "0"
}
]
}),
async created() {
// const isAuthed = this.isAuthenticate;
// console.log(this.isAuthenticate);
if (this.userId === null) {
return null;
} else {
this.userBasic = await this.fetchUserBasicInfoById({
user_id: this.userId
});
this.name = this.userBasic[0].name;
this.username = this.userBasic[0].userName;
console.log(this.username);
console.log(this.name);
return this.userBasic;
}
// console.log(this.userBasic);
},
methods: {
...mapActions("commonUserStore", ["fetchUserBasicInfoById"]),
getImgUrl(img) {
try {
// console.log("image: ", img);
if (!img || img === undefined) {
return (img = "@/assets/user.png");
} else {
return process.env.VUE_APP_ROOT_API + "static/" + img;
}
} catch (error) {
console.log(error);
}
}
}
};
</script>
<style></style>
<file_sep>export const mutations = {
saveMenteeReviewOfSession: (state, payload) => {
state.menteeReviewOfSession = payload;
},
saveAllReviewsFromMentorsAgainstMentee: (state, payload) => {
state.allReviewsFromMentorsAgainstMentee = payload;
}
};
| 1b9b964324bae7cb62090a90dc15b95717fa95a9 | [
"Vue",
"JavaScript"
] | 151 | Vue | programmerdeb7/CareerKi | 37c128ef4f23d136926a812e8d2aa8ae0da629dd | 285bf780cddae1db7c84d0879abb3f2eea60c29b |
refs/heads/master | <file_sep>function cari_ruteman(s,d) {
var start =node[s];
var dest = node[d];
node[start.id].g=0;
node[start.id].h=h=Math.abs(start.x-dest.x)+Math.abs(start.y-dest.y);
node[start.id].f=node[start.id].g+node[start.id].h;
openlist = [start];
closelist=[];
while(openlist.length!=0 && !closelist.includes(dest)) {
while(openlist.length!=0 && !closelist.includes(dest)) {
closelist[closelist.length]=openlist[0];
openlist.splice(0,1);
}
let unique = [...new Set(closelist)];
closelist= unique;
if(!closelist.includes(dest)) {
extendman(dest);
}
if(!closelist.includes(dest) && openlist.length<1) {
extendedman(dest);
}
let test = [...new Set(openlist)];
openlist= test;
}
if(dest.parentid!=null) {
rute = [dest.id];
while(!rute.includes(start.id)) {
rute[rute.length]=node[rute[rute.length-1]].parentid;
}
rute.reverse();
} else {
rute= [];
}
console.log(rute);
}
function extendedman(dd) {
for(cl=closelist.length-1;cl>=0;cl--) {
if(node[closelist[cl].id+1] != null && !closelist.includes(node[closelist[cl].id+1])) {
if(node[closelist[cl].id+1].block==0 && node[closelist[cl].id+1].x==closelist[cl].x && node[closelist[cl].id+1].y==closelist[cl].y+1) {
node[closelist[cl].id+1].parentid=closelist[cl].id;
openlist[openlist.length]=node[closelist[cl].id+1];
node[closelist[cl].id+1].g=closelist[cl].g+1;
node[closelist[cl].id+1].h=Math.abs(node[closelist[cl].id+1].x-dd.x)+Math.abs(node[closelist[cl].id+1].y-dd.y);
node[closelist[cl].id+1].f=node[closelist[cl].id+1].g+node[closelist[cl].id+1].h;
}
}
if(node[closelist[cl].id-1] != null && !closelist.includes(node[closelist[cl].id-1])) {
if(node[closelist[cl].id-1].block==0 && node[closelist[cl].id-1].x==closelist[cl].x && node[closelist[cl].id-1].y==closelist[cl].y-1) {
node[closelist[cl].id-1].parentid=closelist[cl].id;
openlist[openlist.length]=node[closelist[cl].id-1];
node[closelist[cl].id-1].g=closelist[cl].g+1;
node[closelist[cl].id-1].h=Math.abs(node[closelist[cl].id-1].x-dd.x)+Math.abs(node[closelist[cl].id-1].y-dd.y);
node[closelist[cl].id-1].f=node[closelist[cl].id-1].g+node[closelist[cl].id-1].h;
}
}
if(node[closelist[cl].id+20] != null && !closelist.includes(node[closelist[cl].id+20])) {
if(node[closelist[cl].id+20].block==0 && node[closelist[cl].id+20].x==closelist[cl].x+1 && node[closelist[cl].id+20].y==closelist[cl].y) {
node[closelist[cl].id+20].parentid=closelist[cl].id;
openlist[openlist.length]=node[closelist[cl].id+20];
node[closelist[cl].id+20].g=closelist[cl].g+1;
node[closelist[cl].id+20].h=Math.abs(node[closelist[cl].id+20].x-dd.x)+Math.abs(node[closelist[cl].id+20].y-dd.y);
node[closelist[cl].id+20].f=node[closelist[cl].id+20].g+node[closelist[cl].id+20].h;
}
}
if(node[closelist[cl].id-20] != null && !closelist.includes(node[closelist[cl].id-20])) {
if(node[closelist[cl].id-20].block==0 && node[closelist[cl].id-20].x==closelist[cl].x-1 && node[closelist[cl].id-20].y==closelist[cl].y) {
node[closelist[cl].id-20].parentid=closelist[cl].id;
openlist[openlist.length]=node[closelist[cl].id-20];
node[closelist[cl].id-20].g=closelist[cl].g+1;
node[closelist[cl].id-20].h=Math.abs(node[closelist[cl].id-20].x-dd.x)+Math.abs(node[closelist[cl].id-20].y-dd.y);
node[closelist[cl].id-20].f=node[closelist[cl].id-20].g+node[closelist[cl].id-20].h;
}
}
}
}
function extendman(dd) {
for(cl=closelist.length-1;cl>=0;cl--) {
if(openlist.length<1 && node[closelist[cl].id+1] != null && !closelist.includes(node[closelist[cl].id+1])) {
if(node[closelist[cl].id+1].block==0 && node[closelist[cl].id+1].x==closelist[cl].x && node[closelist[cl].id+1].y==closelist[cl].y+1) {
node[closelist[cl].id+1].parentid=closelist[cl].id;
node[closelist[cl].id+1].g=closelist[cl].g+1;
node[closelist[cl].id+1].h=Math.abs(node[closelist[cl].id+1].x-dd.x)+Math.abs(node[closelist[cl].id+1].y-dd.y);
node[closelist[cl].id+1].f=node[closelist[cl].id+1].g+node[closelist[cl].id+1].h;
if (node[closelist[cl].id+1].h<closelist[cl].h && node[closelist[cl].id+1].f<=closelist[cl].f) {
openlist[openlist.length]=node[closelist[cl].id+1];
}
}
}
if(openlist.length<1 && node[closelist[cl].id-1] != null && !closelist.includes(node[closelist[cl].id-1])) {
if(node[closelist[cl].id-1].block==0 && node[closelist[cl].id-1].x==closelist[cl].x &&
node[closelist[cl].id-1].y==closelist[cl].y-1) {
node[closelist[cl].id-1].parentid=closelist[cl].id;
node[closelist[cl].id-1].g=closelist[cl].g+1;
node[closelist[cl].id-1].h=Math.abs(node[closelist[cl].id-1].x-dd.x)+Math.abs(node[closelist[cl].id-1].y-dd.y);
node[closelist[cl].id-1].f=node[closelist[cl].id-1].g+node[closelist[cl].id-1].h;
if (node[closelist[cl].id-1].h<closelist[cl].h && node[closelist[cl].id-1].f<=closelist[cl].f) {
openlist[openlist.length]=node[closelist[cl].id-1];
}
}
}
if(openlist.length<1 && node[closelist[cl].id+20] != null && !closelist.includes(node[closelist[cl].id+20]) ) {
if(node[closelist[cl].id+20].block==0 && node[closelist[cl].id+20].x==closelist[cl].x+1 &&
node[closelist[cl].id+20].y==closelist[cl].y) {
node[closelist[cl].id+20].parentid=closelist[cl].id;
node[closelist[cl].id+20].g=closelist[cl].g+1;
node[closelist[cl].id+20].h=Math.abs(node[closelist[cl].id+20].x-dd.x)+Math.abs(node[closelist[cl].id+20].y-dd.y);
node[closelist[cl].id+20].f=node[closelist[cl].id+20].g+node[closelist[cl].id+20].h;
if (node[closelist[cl].id+20].h<closelist[cl].h && node[closelist[cl].id+20].f<=closelist[cl].f) {
openlist[openlist.length]=node[closelist[cl].id+20];
}
}
}
if(openlist.length<1 && node[closelist[cl].id-20] != null && !closelist.includes(node[closelist[cl].id-20])) {
if(node[closelist[cl].id-20].block==0 && node[closelist[cl].id-20].x==closelist[cl].x-1 &&
node[closelist[cl].id-20].y==closelist[cl].y) {
node[closelist[cl].id-20].parentid=closelist[cl].id;
node[closelist[cl].id-20].g=closelist[cl].g+1;
node[closelist[cl].id-20].h=Math.abs(node[closelist[cl].id-20].x-dd.x)+Math.abs(node[closelist[cl].id-20].y-dd.y);
node[closelist[cl].id-20].f=node[closelist[cl].id-20].g+node[closelist[cl].id-20].h;
if (node[closelist[cl].id-20].h<closelist[cl].h && node[closelist[cl].id-20].f<=closelist[cl].f) {
openlist[openlist.length]=node[closelist[cl].id-20];
}
}
}
}
} | b90671e888bd88a938ef9e73978f09cac9246fcc | [
"JavaScript"
] | 1 | JavaScript | ARIFCAHYO09/snake-ai | 146f2915294518d53b07deedfe1bb4e1d3039bdf | d731adbdff9f7121d5674c1ea6ca24fcfc7a1c3a |
refs/heads/master | <repo_name>wheelburra/Clique-Server-Side<file_sep>/routes/register.js
// Exports below function to be used in other modules
var fs = require('fs');
module.exports = {
//Attempts to push register to DB
register : function (name, username, password, email, collection, callback) {
// Searches database for existing username match
collection.findOne({ username: username }, function (err, doc) {
if (err) {
console.log("There was a problem searching the database.");
callback({ 'message': err });
}
// Username already exists in the database
if (doc) {
console.log("Username already exists.");
callback("Username already exists.");
} else {
// Writing to the DB, adding a new user document to usercollection
collection.insert({
"name": name,
"username": username,
"password": <PASSWORD>,
"email": email
}, function (err) {
if (err) {
// Writing to the database error
console.log("There was a problem adding the information to the database.");
callback({ 'message': err });
} else {
// Creates the users image subdirectory for user albums
var userPath = ".\\public\\images\\" + username;
fs.mkdirSync(userPath);
// Return profile information in JSON
callback("Welcome " + name + "! Press back arrow to log in.");
}
});
}
});
}
};<file_sep>/routes/users.js
var express = require('express');
var router = express.Router();
var login = require('./login'); //allows login.js exports to be used
var register = require('./register'); //allows register.js exports to be used
var picToApp = require('./picToApp'); //allows picToApp.js exports to be used
var createAlbum = require('./createAlbum'); //allows createAlbum.js exports to be used
var picFromApp = require('./picFromApp'); //allows picToApp.js exports to be used
var getFriends = require('./getFriends'); //allows getFriends.js exports to be used
var addFriend = require('./addFriend'); //allows addFriend.js exports to be used
var findUser = require('./findUser'); //allows findUser.js exports to be used
//
//Route calls login function for login.js file
router.get('/login', function (req, res) {
//pull parameters from URL
var username = req.param('username');
var password = req.param('<PASSWORD>');
// Set the internal DB variable
var db = req.db;
// Set the user profile collection to a variable
var collection = db.get('userCollection');
//Returns callback from login function
login.login(username, password, collection, function (found) {
res.send(found);
});
});
//Route calls register function for login.js file
router.get('/register', function (req, res) {
//pull parameters from URL
var name = req.param('name');
var username = req.param('username');
var password = req.param('<PASSWORD>');
var email = req.param('email');
// Set the internal DB variable
var db = req.db;
// Set the user profile collection to a variable
var collection = db.get('userCollection');
// Return callback from register function
register.register(name, username, password, email, collection, function (result) {
res.send(result);
});
});
// Sends requested image to the app
router.get('/picToApp', function (req, res) {
var objID = req.param('objID'); //this is hardcoded on app side for now
var coll = req.param('collection');
// Set the internal DB variable
var db = req.db;
// Set the user profile collection to a variable
var collection = db.get(coll);
picToApp.getPic(objID, collection, function (result) {
//res.writeHead(200, {'Content-Type': 'image/jpeg'});
res.send(result);//use send instead of json, sends binary data as buffer
});
});
/*
* Downloads an image from the phone and stores in users directory and
* adds associated document to the database
* NOT READY YET!!!
*/
router.post('/picFromApp', function (req, res) {
//hard codded for testing
// var album = 'album1';
// var username = 'skyweezy';
// Get our JSON values.
//var album = req.body.album;
//var username = req.body.username;
// uncertain with method to use to obtain the album and username values
// pull parameters from URL
var album = req.param('album');
var username = req.param('username');
var readPic = req.files.image.path;
var origName = req.files.image.originalFilename;
// Set the internal DB variable
var db = req.db;
// Set the user album collection to a variable
var albumCollection = db.get(username + album);
picFromApp.uploadPic(username, album, readPic, origName, albumCollection, function (result) {
res.send(result);
});
});
/* GET the album collection. */
router.get('/getCollection', function (req, res) {
//sets the database
var db = req.db;
// pull parameters from URL
var album = req.param('album');
var username = req.param('username');
// finds the collection named usernamealbum
var collection = db.get(username + album);
collection.find({}, {}, function (err, docs) {
if (err) {
console.log('error occured');
res.send({'message': err});
} else {
res.send(docs);
}
});
});
//Route calls createAlbum function from createAlbum.js file
//to create the album directory in the users image subriectory
// Should this be a post??
router.get('/createAlbum', function (req, res) {
// Pulls parameters from URL
var album = req.param('name');
var username = req.param('username');
var collection = req.param('collection');
// Set the internal DB variable
var db = req.db;
var master = username + "Albums";
// Set the user master album collection to a variable
var masterAlbumCollection = db.get(master);
// Return callback from createAlbum function
createAlbum.createAlbum(username, album, collection, masterAlbumCollection, function (result) {
res.send(result);
});
});
// Requests the Friends List
router.get('/getFriends', function (req, res) {
// Pulls parameters from URL
var username = req.param('username');
var friends = username + "friendsList";
// Set the internal DB variable
var db = req.db;
// Set the user Friends List collection to a variable
var collection = db.get(friends);
getFriends.getFriends(collection, function (result) {
res.send(result);
});
});
// Adds to the Friend List
router.get('/addFriend', function (req, res) {
// Pulls parameters from URL
var username = req.param('username');
var friendName = req.param('friendName');
var friends = username + "friendsList";
// Set the internal DB variable
var db = req.db;
// Set the user Friends List collection to a variable
var collection = db.get(friends);
addFriend.addFriend(friendName, collection, function (result) {
res.send(result);
});
});
// Finds a user to the Friend List
router.get('/findUser', function (req, res) {
// Pulls parameters from URL
var email = req.param('email');
// Set the internal DB variable
var db = req.db;
// Set the userCollection to a variable
var collection = db.get('userCollection');
findUser.findUser(email, collection, function (result) {
res.send(result);
});
});
module.exports = router;
<file_sep>/views/regTest.jade
extends layout
block content
h1= title
form#formRegister(name="register",method="get",action="users/register")
input#inputUserName(type="text", placeholder="username", name="username")
input#inputUserEmail(type="text", placeholder="email", name="email")
input#inputUserEmail(type="text", placeholder="<PASSWORD>", name="<PASSWORD>")
input#inputUserEmail(type="text", placeholder="name", name="name")
button#btnSubmit(type="submit") submit
<file_sep>/views/picTest.jade
//- Tests the Uploading of a Picture
doctype html
html
head
style
include jade-css.css
title Photos Test
body
h1 Photo Upload Area<file_sep>/routes/picToApp.js
/* Retrieves Image related to User */
module.exports = {
getPic: function (objID, collection, callback) {
// finds the one document matching the objectID
collection.findOne({"_id": objID}, function (err, doc) {
if (err) {
console.log("There was a problem searching the database.");
callback({'message': err});
}
// found picture with matching id
if (doc) {
fs = require('fs');
var file = doc.path;
// should include doc.(data from db) in below callback for app to use?
// returns binary data but not in a bson object?
//fs.readFile(file, 'base64', function (err, data) {
fs.readFile(file, function (err, data) {
if (err) {
return console.log(err);
}
//doc gives all the related data from db
//callback({ 'document': doc, 'image': data });
callback(data); //Just send data when sending a picture for full view
});
}
});
}
};
<file_sep>/routes/getFriends.js
/* Retrieves Friends List */
module.exports = {
getFriends: function (collection, callback) {
// returns the list of friends
collection.find({}, {}, function (err, result) {
if (err) {
console.log("Error Searching Document");
callback({'message': err});
} else {
console.log("Returned Entire Friends List");
callback(result);
}
});
}
};<file_sep>/routes/index.js
var express = require('express');
var router = express.Router();
/* GET home page. */
router.get('/', function (req, res, next) {
res.render('index', {title: 'Clique'});
});
/* GET Userlist stuff page displays the usercollection for testing purposes. */
router.get('/userlist', function (req, res) {
var db = req.db;
var collection = db.get('userCollection');
collection.find({}, {}, function (e, docs) {
res.render('userlist', {
"userlist": docs
});
});
});
// Same as above test with just printing JSon data
router.get('/collection', function (req, res) {
var coll = req.param('collection');
var db = req.db;
var collection = db.get(coll);
collection.find({}, {}, function (e, docs) {
res.json(docs); //send JSON data!
});
});
//Testing url paramter passing and returning, no DB involved
router.get('/urltest', function (req, res) {
var id = req.param('id');
var token = req.param('token');
res.send(id + ' ' + token);
});
// GET regTest.jade the REGISTRATION TEST PAGE.
router.get('/regTest', function (req, res) {
res.render('regTest', {title: 'Simulates a User Registration to /register'});
});
// GET loginTest.jade the LOGIN TEST PAGE.
router.get('/loginTest', function (req, res) {
res.render('loginTest', {title: 'Simulates a User Login to /login'});
});
module.exports = router;
<file_sep>/routes/findUser.js
// Searches the db for an email match and returns the corrisponding username
module.exports = {
findUser: function (email, collection, callback) {
// Searches database for email match
collection.findOne({email: email}, {password: 0}, function (err, doc) {
if (err) {
console.log("There was a problem searching the database.");
callback({'message': err});
}
// Found matching email and returns username
if (doc === null) {
console.log("No match found");
callback("No Match Found");
} else {
console.log("found " + doc.username);
callback(doc.username);
}
});
}
};<file_sep>/README.md
# Clique-Server-Side
Server-Side files for Clique
<file_sep>/routes/CreateAlbum.js
// Exports below function to be used in other modules
var fs = require('fs');
module.exports = {
//Attempts to push register to DB
createAlbum: function (username, album, collection, masterAlbumCollection, callback) {
// Sets album path to variable
var albumPath = ".\\public\\images\\" + username + "\\" + album;
// checks if album subdirectory does not exist
if (!fs.existsSync(albumPath)) {
// Creates the album subdirectory
fs.mkdir(albumPath, function (err) {
if (err) {
return console.error(err);
}
console.log("Directory created successfully!");
// insert adds a new master album collection
masterAlbumCollection.insert({
"name": album,
"collection": username + collection
}, function (err) {
if (err) {
// Writing to the database error
console.log("There was a problem adding the new collection to the database.");
callback({'message': err});
} else {
console.log('Album and collection created');
callback('success');
}
});
});
} else {
// album already exists
console.log("Directory already exists!");
callback('Entry is blank or Album name already exists.');
}
}
};<file_sep>/routes/addFriend.js
/* Adds a username to the Friends List */
module.exports = {
addFriend: function (friendName, collection, callback) {
// Writing to the DB, adding a new user document to usercollection
collection.insert({
"username": friendName
}, function (err) {
if (err) {
// Writing to the database error
console.log("There was a problem adding the information to the database.");
callback({'message': err});
} else {
console.log("Added Friend");
callback(friendName + " was added to your friend list.");
}
});
}
};<file_sep>/routes/login.js
// Exports below function to be used in other modules
module.exports = {
login : function (username, password, collection, callback) { //Searches passed collection for user and returns callback
// Finds a single document in the collection
collection.findOne({ username: username }, function (err, doc) {
if (err) {
console.log("There was a problem retrieving the information to the database.");
callback({ 'message': err });
}
// Found a username match in databse
if (doc) {
var urlPass = <PASSWORD>;
var dbPass = doc.password;
// the password matches!
if (urlPass == dbPass) {
console.log(dbPass + " matches " + urlPass);
// Send user profile in JSON
callback(doc);
}
// The password is incorrect
else {
console.log("Password does not match");
callback("error");
}
}
// No username match found in database
else {
console.log(username + " does not exist");
callback("error");
}
});
}
};<file_sep>/views/index.jade
// index.jade
doctype html
html
head
style
include jade-css.css
body
h1 Clique
div
p Welcome to Clique!
button(onclick="viewList()") View User List
button(onclick="getPictures()") View Images
button(onclick="login()") Login to Clique
button(onclick="register()") Register to Clique
//- <NAME> - Feb. 26th - Simulates a User with Pop-Up Window to Photos
// action will need to point to the route that handles uploads. currently /pictures
div Upload a Photo:
br
form#formUpload(name="upload", method="post", action="/picFromApp")
input#picInput(type="file", placeholder="file", nam="file")
button#btnSubmit(type="submit") submit
script.
function viewList() { window.location.href = '/userlist'; }
function getPictures() { window.location.href = '/picTest'; }
function login() { window.location.href = '/loginTest'; }
function register() { window.location.href = '/regTest'; }
// enctype="multipart/form-data"<file_sep>/nbproject/project.properties
file.reference.node-android-public=public
files.encoding=UTF-8
site.root.folder=${file.reference.node-android-public}
source.folder=
| 0d39465cdb706c2461e2e2e436f75660105521e2 | [
"Markdown",
"JavaScript",
"INI",
"Pug"
] | 14 | Markdown | wheelburra/Clique-Server-Side | 2e9e2be5b025f10cffabf634dfc94e06ede6f90b | 338ec32d7a0ae5a499a88244ddf6cbbad026fd92 |
refs/heads/main | <repo_name>NeilMBennett/EconomicTracker<file_sep>/docs/oi_tracker_data_revisions.src.md
---
geometry: margin=1in
fontsize: 11pt
linestretch: 1
colorlinks: true
numbersections: true
title: |
| Opportunity Insights Economic Tracker
| Data Revisions
subtitle: last updated on 2021-08-18
documentclass: scrartcl
---
<a href="https://raw.githubusercontent.com/OpportunityInsights/EconomicTracker/main/docs/oi_tracker_data_revisions.pdf"><img src="pdf-icon.svg" alt="PDF Download" width="50" style="display:inline;"/> <img src="null.png" alt="Click here to download a PDF version of this document" /></a>
# Overview
This document provides a description of major revisions to the data posted by the Opportunity Insights Economic Tracker. The document is organized sequentially by series in the tracker, among series that have had substantive data revisions since June 30th 2021 due to changes in data processing or data sources over time.
This document is updated regularly and the following information is subject to change.
For further information or if you have any questions please feel free to reach out to [<EMAIL>](mailto:<EMAIL>) and someone on our team will be in touch.
# Data Series
## Employment
**Revisions on June 30th 2021**
The Employment data was revised on June 30th 2021 due to three independent changes in methodology.
- **Revisions to address end-of-year "churn" in Paychex client base:** over time, some firms enter and exit Paychex's client base. This is especially concentrated at the end of each calendar year, where there is significant churn as firms renew their payroll processing contracts. This creates two sources of error in the tracker series. First, due to this seasonal pattern, the raw Paychex data displays a downwards trend in employment at the end of each calendar year as some clients leave Paychex, followed by an upward trend in employment at the very beginning of each calendar year as new clients join. Second, we take steps to avoid firm entry and exit that are more responsive to firm entry and exit at finer levels of geography, so that we can minimize the number of discontinuous changes in employment. The firm entry and exit occurring due to end-of-year "churn" in Paychex's client base resulted in a discrepancy between the national employment series and the corresponding series at the state level.
To avoid these sources of error, we have changed the way in which we process employment data around the end of each calendar year. We now adjust for the end-of-year pattern in the Paychex data using data from the end of 2019. For each date between December 10 2020 and January 10 2021, using Paychex data on employment at the national level, we compute the change in employment relative to December 10 2020 at the two-digit NAICS code x income quartile level. We also compute the change in employment between the corresponding day in the previous year and December 10 2019. We divide the change in employment relative to December 10 2020 by the corresponding change in employment the previous year relative to December 10 2019. At the national level, as of June 2021, this change results in an upwards revision of the employment series by between 1 and 3 percentage points from December 2020 through May 2021. We then apply the same adjustment to each two-digit NAICS code x income quartile cell at the state, county and city levels. For this reason, the state, county and city-level series have also been adjusted upwards by between 1 and 3 percentage points from December 2020 through May 2021.
- **Revisions arising from changes to adjustment for firm entry/exit in Paychex data**: over time, the Paychex sample changes as clients begin to use or stop using Paychex’s payroll processing services. We previously adjusted for firm entry and exit separately at the national and state levels; for details on this adjustment, see Appendix D of Chetty, Friedman, Hendren and Stepner (November 2020). This introduced the possibility of discrepancies between the national-level and the (employment-weighted) mean of the state-level employment series. Empirically, these discrepancies were small throughout most of 2020, but began to grow in December 2020 due to increased churn in Paychex’s client base at the end of 2020, as described above. Since the firm entry/exit adjustment was applied retrospectively, this led to discrepancies throughout the series.
We have changed our approach to adjusting for firm entry/exit to avoid these discrepancies. In each county x industry (two-digit NAICS code) x firm size x income quartile cell, we now compute the change in employment relative to January 4-31 2020, and the change in employment relative to July 1-31 2020. For county x industry x firm size x income quartile cells with over 50 employees at any point between January 2020 and the end of the series, we reduce the weight we place on the series if we observe changes in employment that indicate firm entry or exit. In particular, we compute the weight on the cell for county _c_, industry _i_, firm size _s_, and income quartile _q_ as:
<img src="https://render.githubusercontent.com/render/math?math=\text{Weight}_{c, i, s, q} = \text{max} \Big\{ 1 - \mathbf{1} \{ \text{Min Normed July}_{c, i, s, q} \leq 50 \} \times (50 - \text{Min Normed July}_{c, i, s, q}) \times 0.02 - \mathbf{1} \{ \text{Max Normed January}_{c, i, s, q} \geq 150 \} \times (\text{Max Normed January}_{c, i, s, q} - 150) \times 0.02, 0 \Big\}">
\begin{align*}
\text{Weight}_{c, i, s, q} =& \text{max} \Big\{ 1 - \mathbf{1} \{ \text{Min Normed July}_{c, i, s, q} \leq 50 \} \times (50 - \text{Min Normed July}_{c, i, s, q}) \times 0.02\\
&- \mathbf{1} \{ \text{Max Normed January}_{c, i, s, q} \geq 150 \} \times (\text{Max Normed January}_{c, i, s, q} - 150) \times 0.02, \\
&0 \Big\}
\end{align*}
where Min Normed July<sub>_c, i, s, q_</sub> is the smallest value of indexed employment we observe at each date relative to its mean level over the period July 1-31 2020, and Max Normed January<sub>_c, i, s, q_</sub> is the largest value of indexed employment we observe at each date relative to its mean level over the period January 4-31 2020.
That is, we reduce the weight we place on the cell by two percentage points for each percentage point of growth we observe above 150 percentage points relative to January 2020. We then further reduce the weight we place on each cell by two percentage points of its January 2020 level for each percentage point of decline we observe below 50 percentage points relative to July 2020.
At the national level, this change revises the aggregate employment series between -2 (in October 2020) and +2 (in January 2021) percentage points, as of June 2021. This change also substantially revises the state-level employment series. The mean revision (in absolute value) for aggregate employment at the state level is 5.6 percentage points as of June 2021. This revision is largest in March 2021, when the mean revision (in absolute value) is 9.1 percentage points.
- **Revisions to address changes in minimum wages**: we use hourly wage thresholds when constructing employment by income quartile in the Paychex data. The threshold for the bottom quartile of employment is $13; that is, workers who earn below $13 are assigned to the bottom quartile, whereas workers earning above (or exactly) $13 are allocated to other wage quartiles. On January 1 2021, minimum wage changes came into force in CA, MA, AZ and NY, which caused the minimum wage for some workers to move from below $13 to above $13. This resulted in a decline in employment for workers earning below $13, and a corresponding increase in employment for workers earning above $13, as firms increased workers' wages in response to the minimum wage change. This was reflected in a decrease in low-income employment and an increase in middle-income employment in the tracker data for states in which the minimum wage had increased above $13.
Though the tracker data for these states accurately represented trends in employment among workers earning below $13, the movement of workers around the minimum wage threshold created difficulties when comparing trends in low-wage employment across states. We have taken four steps to address this problem. First, we have created additional series for below-median-income and above-median-income employment, which can be downloaded from this repository. As the national median wage threshold in the Paychex data is $18.18, the below-median-income series is not affected by the shifting of workers induced by the minimum wage change. Second, we have added annotations to the tracker data indicating minimum wage changes in the relevant states. Third, we have suppressed series cut by income quartile in these states after December 1 2020, to avoid displaying series that are substantially affected by minimum wage changes. Finally, the impacts of the minimum wage on low-income employment in these states also affected trends at the national level: as workers' wages increased above the $13 threshold, national employment fell. When computing the national-level trend in employment, we now exclude trends in CA, MA, AZ and NY between December 10 2020 and February 10 2021. We continue to use trends in these states when computing national-level employment from February 11 2021 forward.
After making these changes, the (population-weighted) RMSE (root mean square error) of the state-level employment series relative to the CPS is 4.55 percentage points as of April 2021, after removing public sector and furloughed workers and expressing employment in seasonally-unadjusted terms relative to January 2020. Though we will continue to assess our series relative to the CPS, users should note that some amount of noise remains in both our state-level series estimates and in the CPS estimates. Particularly in instances where these two measures of employment differ, users may consider both the Opportunity Insights series and the CPS as helpful inputs in identifying local patterns.
## Unemployment Claims
**Revisions on July 29th 2021**
The unemployment data was revised on July 29th 2021 to correct a data processing error. Previously we assigned continued PEUC and PUA claims to the end of week date indicated by the Report Date rather than the Reflect Date in the [Department of Labor data](https://oui.doleta.gov/unemploy/docs/weekly_pandemic_claims.xlsx). Effectively, this meant continued PEUC and PUA claims were offset by one week into the future in our data. We've corrected this, and claims now align to the appropriate week.
## COVID-19 Infections
**Revisions on March 17th 2021**
Previously we pulled reported cases and deaths from the New York Times' [COVID-19 database](https://github.com/nytimes/covid-19-data) and reported tests from the [COVID Tracking Project](https://covidtracking.com/). At the conclusion of the COVID Tracking Project's efforts in order to collect testing data we instead began pulling reported cases, deaths, and tests at the county level from the Centers For Disease Control and Prevention's [COVID Data Tracker](https://covid.cdc.gov/covid-data-tracker/#datatracker-home) and aggregated to other geographies.
**Revisions on August 4th 2021**
Previously we pulled reported cases, deaths, and tests at the county level from the Centers For Disease Control and Prevention's COVID Data Tracker and aggregated to other geographies. On July 17th the Centers For Disease Control and Prevention began suppressing reported cases and deaths making aggregations across counties no longer feasible and we began to instead pull reported cases and deaths from the New York Times' [COVID-19 database](https://github.com/nytimes/covid-19-data), state level reported tests from the Johns Hopkins Coronavirus Resource Center's [U.S. testing database](https://github.com/govex/COVID-19/tree/master/data_tables/testing_data), and county level reported tests from [The Centers for Disease Control and Prevention](https://covid.cdc.gov/covid-data-tracker/#datatracker-home).
<file_sep>/docs/oi_tracker_data_dictionary.src.md
---
geometry: margin=1in
fontsize: 11pt
linestretch: 1
colorlinks: true
numbersections: true
title: |
| Opportunity Insights Economic Tracker
| Data Dictionary
subtitle: last updated on 2021-09-14
documentclass: scrartcl
---
<a href="https://raw.githubusercontent.com/OpportunityInsights/EconomicTracker/main/docs/oi_tracker_data_dictionary.pdf"><img src="pdf-icon.svg" alt="PDF Download" width="50" style="display:inline;"/> <img src="null.png" alt="Click here to download a PDF version of this document" /></a>
\renewcommand{\thesubsection}{\arabic{subsection}}
## Overview
Each data source and level of aggregation has a separate CSV, named using the following convention: *Data source* – *Geographic Level of Aggregation* – *Temporal Level of Aggregation*
Additionally, we have three files, **GeoIDs – State** and **GeoIDs – County** and **GeoIDs – City**, that provide information on geographic crosswalks and aggregation. These can be merged to any file sharing the same geographic level of aggregation using the geographic identifier. Additionally, **GeoIDs – County** indicates the commuting zone (CZ) and state that each county belongs to. The City-level data (listed under "Metro" on the tracker site) associates the largest cities in the United States with a representative county one-to-one (except in the case of New York City which includes the 5 boroughs).
Finally, we have gathered a collection of key state-level policy dates relevant for changes in other series trends and values. These are contained in the **Policy Milestones – State** file.
A description of the columns in each file follows.
## GeoID File Descriptions
### GeoIDs - State.csv
Geographic identifier: `statefips`
- `statename`: The name of the state.
- `stateabbrev`: The 2-letter state abbreviation.
- `state_pop2019`: The population of the state in 2019, from Census Bureau estimates.
### GeoIDs - County.csv
Geographic identifier: `countyfips`
- `countyname`: The name of the county.
- `cityid`: The city identifier that the county is assigned to.
- `cityname`: The name of the city that the county is assigned to.
- `cz`: The numeric identifier of the commuting zone (CZ) in which the county is contained.
- `czname`: The name of the commuting zone (CZ) in which the county is contained.
- `statename`: The name of the state in which the county is contained.
- `statefips`: The FIPS code of the state in which the county is contained.
- `stateabbrev`: The 2-letter abbreviation of the state in which the county is contained.
- `county_pop2019`: The population of the county in 2019 according to Census Bureau estimates.
### GeoIDs - City.csv
Geographic identifier: `cityid`
- `cityname`: The name of the city.
- `stateabbrev`: The 2-letter abbreviation of the primary state in which the city is contained.
- `statename`: The name of the primary state in which the city is contained.
- `statefips`: The FIPS code of the primary state in which the city is contained.
- `lat`: Latitude of the city.
- `lon`: Longitude of the city.
- `city_pop2019`: The population of the city in 2019 according to Census Bureau estimates, calculated as population of the county or counties assigned to the city.
## Data File Descriptions
### Affinity
Credit/debit card spending data from [Affinity Solutions](https://www.affinity.solutions).
- `spend_all`: Spending in all merchant category codes (MCCs).
- `spend_all_q#`: ...by consumers living in ZIP codes with median income in quartile `#`.
- `spend_all_incmiddle`: ...by consumers living in ZIP codes with middle (middle two quartiles) median income.
- `spend_aap`: Spending in apparel and accessories (AAP) MCCs.
- `spend_aap_q#`: ...by consumers living in ZIP codes with median income in quartile `#`.
- `spend_acf`: Spending in accomodation and food service (ACF) MCCs.
- `spend_acf_q#`: ...by consumers living in ZIP codes with median income in quartile `#`.
- `spend_aer`: Spending in arts, entertainment, and recreation (AER) MCCs.
- `spend_aer_q#`: ...by consumers living in ZIP codes with median income in quartile `#`.
- `spend_apg`: Spending in general merchandise stores (GEN) and apparel and accessories (AAP) MCCs.
- `spend_apg_q#`: ...by consumers living in ZIP codes with median income in quartile `#`.
- `spend_gen`: Spending in general merchandise stores (GEN) MCCs.
- `spend_gen_q#`: ...by consumers living in ZIP codes with median income in quartile `#`.
- `spend_grf`: Spending in grocery and food store (GRF) MCCs.
- `spend_grf_q#`: ...by consumers living in ZIP codes with median income in quartile `#`.
- `spend_hcs`: Spending in health care and social assistance (HCS) MCCs.
- `spend_hcs_q#`: ...by consumers living in ZIP codes with median income in quartile `#`.
- `spend_hic`: Spending in home improvement centers (HIS) MCCs.
- `spend_hic_q#`: ...by consumers living in ZIP codes with median income in quartile `#`.
- `spend_sgh`: Spending in sporting goods and hobby (SGH) MCCs.
- `spend_sgh_q#`: ...by consumers living in ZIP codes with median income in quartile `#`.
- `spend_tws`: Spending in transportation and warehousing (TWS) MCCs.
- `spend_tws_q#`: ...by consumers living in ZIP codes with median income in quartile `#`.
- `spend_retail_w_grocery`: Spending in retail (AAP, CEC, GEN, GRF, HIC, SGH, ETC) MCCs including grocery spending.
- `spend_retail_w_grocery_q#`: ...by consumers living in ZIP codes with median income in quartile `#`.
- `spend_retail_no_grocery`: Spending in retail (AAP, CEC, GEN, HIC, SGH, ETC) MCCs excluding grocery spending.
- `spend_retail_no_grocery_q#`: ...by consumers living in ZIP codes with median income in quartile `#`.
- `spend_durables`: Spending in durable goods (CEC, CTE, HIC, MOV, SGH, ETC) MCCs.
- `spend_durables_q#`: ...by consumers living in ZIP codes with median income in quartile `#`.
- `spend_nondurables`: Spending in non-durable goods (AAP, AFH, GEN, GRF, HPC, WHT) MCCs.
- `spend_nondurables_q#`: ...by consumers living in ZIP codes with median income in quartile `#`.
- `spend_remoteservices`: Spending in remote services (AWS, CNS, EDS, FAI, INF, PST, PUA, UTL) MCCs.
- `spend_remoteservices_q#`: ...by consumers living in ZIP codes with median income in quartile `#`.
- `spend_inpersonmisc`: Spending in other in-person services (MOS, RLL) MCCs.
- `spend_inpersonmisc_q#`: ...by consumers living in ZIP codes with median income in quartile `#`.
- `provisional`: Indicator to mark that the date is within the most recent three weeks of data and is subject to non-negligible changes as new data is posted.
- `freq`: Marks whether the data represents a daily ("d") or weekly ("w") value.
<!-- List additional variables in comments so they are detected by `verify-csv-columns.py`
- `spend_s_all`:
- `spend_s_all_q#`:
- `spend_s_all_incmiddle`:
- `spend_s_aap`:
- `spend_s_acf`:
- `spend_s_aer`:
- `spend_s_apg`:
- `spend_s_durables`:
- `spend_s_nondurables`:
- `spend_s_remoteservices`:
- `spend_s_inpersonmisc`:
- `spend_s_gen`:
- `spend_s_grf`:
- `spend_s_hcs`:
- `spend_s_hic`:
- `spend_s_sgh`:
- `spend_s_tws`:
- `spend_s_retail_w_grocery`:
- `spend_s_retail_no_grocery`:
- `spend_19_all`:
- `spend_19_all_q#`:
- `spend_19_all_incmiddle`:
- `spend_19_aap`:
- `spend_19_acf`:
- `spend_19_aer`:
- `spend_19_apg`:
- `spend_19_gen`:
- `spend_19_grf`:
- `spend_19_hcs`:
- `spend_19_hic`:
- `spend_19_sgh`:
- `spend_19_tws`:
- `spend_19_inpersonmisc`:
- `spend_19_durables`:
- `spend_19_nondurables`:
- `spend_19_remoteservices`:
- `spend_19_retail_no_grocery`:
- `spend_19_retail_w_grocery`:
-->
All spending variables are measured relative to January 4-31 2020, seasonally adjusted, and calculated as a 7 day moving average. When we subdivide by income using the median income of the ZIP codes, `q1` is the quartile with the lowest median income and `q4` is the quartile with the highest median income. At the national level, we release a variety of breakdowns *without seasonal adjustment* in variables that begin with `spend_s_` (relative to Jan 2020) or `spend_19_` (relative to Jan 2019) instead of `spend_`.
The merchant category codes (MCC) making up the grouped spending categories are:
* **Retail spending:** AAP apparel and accessories, CEC consumer electronics, GEN general merchandise stores, GRF groceries, HIC home improvement centers, SGH sporting goods and hobby, ETC miscellaneous.
* **Durable goods:** CEC consumer electronics, CTE telecommunications equipment, HIC home improvement centers, MOV motor vehicles, SGH sporting goods and hobby, ETC miscellaneous.
* **Non-durable goods:** AAP apparel and accessories, AFH agriculture forestry and hunting, GEN general merchandise stores, GRF groceries, HPC health and personal care stores, WHT wholesale trade.
* **Remote services:** AWS administration and waste services, CNS construction, EDS education, FAI finance and insurance, INF information, PST professional/scientific services, PUA public administration, UTL utilities.
* **Other in-person services:** MOS barber shops and spas, RLL real estate and leasing.
### Burning Glass
Job postings data from [Burning Glass Technologies](https://www.burning-glass.com/).
- `bg_posts`: Average level of job postings relative to January 4-31 2020.
- `bg_posts_ss30`: Average level of job postings relative to January 4-31 2020 in manufacturing (NAICS supersector 30).
- `bg_posts_ss55`: Average level of job postings relative to January 4-31 2020 in financial activities (NAICS supersector 55).
- `bg_posts_ss60`: Average level of job postings relative to January 4-31 2020 in professional and business services (NAICS supersector 60).
- `bg_posts_ss65`: Average level of job postings relative to January 4-31 2020 in education and health services (NAICS supersector 65).
- `bg_posts_ss70`: Average level of job postings relative to January 4-31 2020 in leisure and hospitality (NAICS supersector 70).
- `bg_posts_jz1`: Average level of job postings relative to January 4-31 2020 requiring little/no preparation (ONET jobzone level 1).
- `bg_posts_jz2`: Average level of job postings relative to January 4-31 2020 requiring some preparation (ONET jobzone level 2).
- `bg_posts_jz3`: Average level of job postings relative to January 4-31 2020 requiring medium preparation (ONET jobzone level 3).
- `bg_posts_jz4`: Average level of job postings relative to January 4-31 2020 requiring considerable preparation (ONET jobzone level 4).
- `bg_posts_jz5`: Average level of job postings relative to January 4-31 2020 requiring extensive preparation (ONET jobzone level 5).
- `bg_posts_jzgrp12`: Average level of job postings relative to January 4-31 2020 requiring low preparation (ONET jobzone levels 1 and 2).
- `bg_posts_jzgrp345`: Average level of job postings relative to January 4-31 2020 requiring high preparation (ONET jobzone levels 3, 4 and 5).
### COVID
COVID cases, deaths, tests, and vaccination numbers are from the [CDC](https://covid.cdc.gov/covid-data-tracker/#datatracker-home).
- `case_rate`: Confirmed COVID-19 cases per 100,000 people, seven day moving average.
- `case_count`: Confirmed COVID-19 cases, seven day moving average.
- `new_case_rate`: New confirmed COVID-19 cases per 100,000 people, seven day moving average.
- `new_case_count`: New confirmed COVID-19 cases, seven day moving average.
- `death_rate`: Confirmed COVID-19 deaths per 100,000 people, seven day moving average.
- `death_count`: Confirmed COVID-19 deaths, seven day moving average.
- `new_death_rate`: New confirmed COVID-19 deaths per 100,000 people, seven day moving average.
- `new_death_count`: New confirmed COVID-19 deaths, seven day moving average.
- `test_rate`: Confirmed COVID-19 tests per 100,000 people, seven day moving average.
- `test_count`: Confirmed COVID-19 tests, seven day moving average.
- `new_test_rate`: New confirmed COVID-19 tests per 100,000 people, seven day moving average.
- `new_test_count`: New confirmed COVID-19 tests, seven day moving average.
- `vaccine_rate`: First vaccine doses administered per 100 people.
- `vaccine_count`: First vaccine doses administered.
- `new_vaccine_rate`: New first vaccine doses administered per 100 people, seven day moving average.
- `new_vaccine_count`: New first vaccine doses administered, seven day moving average.
- `fullvaccine_rate`: Vaccine series completed per 100 people.
- `fullvaccine_count`: Vaccine series completed.
- `new_fullvaccine_rate`: New vaccine series completed per 100 people, seven day moving average.
- `new_fullvaccine_count`: New vaccine series completed, seven day moving average.
### Google Mobility
GPS mobility data indexed to Jan 3-Feb 6 2020 from [Google COVID-19 Community Mobility Reports](https://www.google.com/covid19/mobility/).
- `gps_away_from_home`: Time spent outside of residential locations.
- `gps_retail_and_recreation`: Time spent at retail and recreation locations.
- `gps_grocery_and_pharmacy`: Time spent at grocery and pharmacy locations.
- `gps_parks`: Time spent at parks.
- `gps_transit_stations`: Time at inside transit stations.
- `gps_workplaces`: Time spent at work places.
- `gps_residential`: Time spent at residential locations.
### Employment
Employment levels relative to Jan 4-31 2020 from [Paychex](https://www.paychex.com/), [Intuit](https://www.intuit.com/), [Earnin](https://www.earnin.com/) and [Kronos](https://www.kronos.com/).
- `emp`: Employment level for all workers.
- `emp_incq1`: Employment level for workers in the bottom quartile of the income distribution (incomes approximately under $27,000).
- `emp_incq2`: Employment level for workers in the second quartile of the income distribution (incomes approximately $27,000 to $37,000).
- `emp_incmiddle`: Employment level for workers in the middle two quartiles of the income distribution (incomes approximately $27,000 to $60,000).
- `emp_incq3`: Employment level for workers in the third quartile of the income distribution (incomes approximately $37,000 to $60,000).
- `emp_incq4`: Employment level for workers in the top quartile of the income distribution (incomes approximately over $60,000).
- `emp_incbelowmed`: Employment level for workers in the bottom half of the income distribution (incomes approximately under $37,000).
- `emp_incabovemed`: Employment level for workers in the top half of the income distribution (incomes approximately over $37,000).
- `emp_ss40`: Employment level for workers in trade, transportation and utilities (NAICS supersector 40).
- `emp_ss60`: Employment level for workers in professional and business services (NAICS supersector 60).
- `emp_ss65`: Employment level for workers in education and health services (NAICS supersector 65).
- `emp_ss70`: Employment level for workers in leisure and hospitality (NAICS supersector 70).
- `emp_retail`: Employment level for workers in retail (NAICS sector 44-45).
- `emp_retail_inclow`: Employment level for workers in retail (NAICS sector 44-45) and in the bottom quartile of the income distribution (incomes approximately under $27,000).
- `emp_retail_incmiddle`: Employment level for workers in retail (NAICS sector 44-45) and in the middle two quartiles of the income distribution (incomes approximately $27,000 to $60,000).
- `emp_retail_inchigh`: Employment level for workers in retail (NAICS sector 44-45) and in the top quartile of the income distribution (incomes approximately over $60,000).
- `emp_s72`: Employment level for workers in accommodation and food services (NAICS sector 72).
- `emp_subset_unweighted_q1`: Employment level for workers in the bottom quartile of the income distribution (incomes approximately under $27,000) in county x industry (2-digit NAICS code) cells with nonzero employment for all four income quartiles.
- `emp_subset_unweighted_q2`: Employment level for workers in the second quartile of the income distribution (incomes approximately $27,000 to $37,000) in county x industry (2-digit NAICS code) cells with nonzero employment for all four income quartiles.
- `emp_subset_unweighted_q3`: Employment level for workers in the third quartile of the income distribution (incomes approximately $37,000 to $60,000) in county x industry (2-digit NAICS code) cells with nonzero employment for all four income quartiles.
- `emp_subset_unweighted_q4`: Employment level for workers in the top quartile of the income distribution (incomes approximately over $60,000) in county x industry (2-digit NAICS code) cells with nonzero employment for all four income quartiles.
- `emp_subset_reweighted_q1`: Employment level for workers in the bottom quartile of the income distribution (incomes approximately under $27,000), reweighting to match the county x industry (2-digit NAICS code) distribution of workers in the top quartile of the income distribution.
- `emp_subset_reweighted_q2`: Employment level for workers in the bottom quartile of the income distribution (incomes approximately under $27,000), reweighting to match the county x industry (2-digit NAICS code) distribution of workers in the top quartile of the income distribution.
- `emp_subset_reweighted_q3`: Employment level for workers in the bottom quartile of the income distribution (incomes approximately under $27,000) in county x industry cells with nonzero employment for all four income quartiles, reweighting to match the county x industry (2-digit NAICS code) distribution of workers in the top quartile of the income distribution.
- `emp_advance`: Indicator (0 or 1) for whether employment data is a forecasted employment level based on timecard data from Kronos and employees on weekly paycycles from Paychex.
- 'emp_size_0': Employment level for workers at firms with less than or equal to 100 employees
- 'emp_size_100': Employment level for workers at firms with greater than 100 but less than or equal to 500 employees
- 'emp_size_500': Employment level for workers at firms with greater than 500 but less than or equal to 800 employees
- 'emp_size_800': Employment level for workers at firms with greater than 800 employees
### UI Claims
Unemployment insurance claims data from the [Department of Labor](https://oui.doleta.gov/unemploy/DataDashboard.asp) (national and state-level) and numerous individual state agencies (county-level).
- `initclaims_rate_regular`: Number of initial claims per 100 people in the 2019 labor force, Regular UI only
- `initclaims_count_regular`: Count of initial claims, Regular UI only
- `initclaims_rate_pua`: Number of initial claims per 100 people in the 2019 labor force, PUA (Pandemic Unemployment Assistance) only
- `initclaims_count_pua`: Count of initial claims, PUA (Pandemic Unemployment Assistance) only
- `initclaims_rate_combined`: Number of initial claims per 100 people in the 2019 labor force, combining Regular and PUA claims
- `initclaims_count_combined`: Count of initial claims, combining Regular and PUA claims
- `contclaims_rate_regular`: Number of continued claims per 100 people in the 2019 labor force, Regular UI only
- `contclaims_count_regular`: Count of continued claims, Regular UI only
- `contclaims_rate_pua`: Number of continued claims per 100 people in the 2019 labor force, PUA (Pandemic Unemployment Assistance) only
- `contclaims_count_pua`: Count of continued claims, PUA (Pandemic Unemployment Assistance) only
- `contclaims_rate_peuc`: Number of continued claims per 100 people in the 2019 labor force, PEUC (Pandemic Emergency Unemployment Compensation) only
- `contclaims_count_peuc`: Count of continued claims, PEUC (Pandemic Emergency Unemployment Compensation) only
- `contclaims_rate_combined`: Number of continued claims per 100 people in the 2019 labor force, combining Regular, PUA and PEUC claims
- `contclaims_count_combined`: Count of continued claims, combining Regular, PUA and PEUC claims
### Womply
Small business openings and revenue data from [Womply](https://www.womply.com/).
- `merchants_all`: Percent change in number of small businesses open calculated as a seven-day moving average seasonally adjusted and indexed to January 4-31 2020.
- `merchants_inchigh`: Percent change in number of small businesses open calculated as a seven-day moving average seasonally adjusted and indexed to January 4-31 2020 in high income (quartile 4 of median income) ZIP codes.
- `merchants_incmiddle`: Percent change in number of small businesses open calculated as a seven-day moving average seasonally adjusted and indexed to January 4-31 2020 in middle income (quartiles 2 & 3 of median income) ZIP codes.
- `merchants_inclow`: Percent change in number of small businesses open calculated as a seven-day moving average seasonally adjusted and indexed to January 4-31 2020 in low income (quartile 1 of median income) ZIP codes.
- `merchants_ss40`: Percent change in number of small businesses open calculated as a seven-day moving average seasonally adjusted and indexed to January 4-31 2020 in transportation (NAICS supersector 40).
- `merchants_ss60`: Percent change in number of small businesses open calculated as a seven-day moving average seasonally adjusted and indexed to January 4-31 2020 in professional and business services (NAICS supersector 60).
- `merchants_ss65`: Percent change in number of small businesses open calculated as a seven-day moving average seasonally adjusted and indexed to January 4-31 2020 in education and health services (NAICS supersector 65).
- `merchants_ss70`: Percent change in number of small businesses open calculated as a seven-day moving average seasonally adjusted and indexed to January 4-31 2020 in leisure and hospitality (NAICS supersector 70).
- `merchants_retail`: Percent change in number of small businesses open, calculated as a seven-day moving average, seasonally adjusted, and indexed to January 4-31, 2020 in retail businesses (NAICS 2-digit codes 44-45).
- `merchants_food_accommodation`: Percent change in number of small businesses open, calculated as a seven-day moving average, seasonally adjusted, and indexed to January 4-31, 2020 in food and accommodation businesses (NAICS 2-digit code 72)
- `revenue_all`: Percent change in net revenue for small businesses, calculated as a seven-day moving average, seasonally adjusted, and indexed to January 4-31 2020.
- `revenue_inchigh`: Percent change in net revenue for small businesses, calculated as a seven-day moving average, seasonally adjusted, and indexed to January 4-31 2020 in high income (quartile 4 of median income) zipcodes.
- `revenue_incmiddle`: Percent change in net revenue for small businesses, calculated as a seven-day moving average, seasonally adjusted, and indexed to January 4-31 2020 in middle income (quartiles 2 & 3 of median income) zipcodes.
- `revenue_inclow`: Percent change in net revenue for small businesses, calculated as a seven-day moving average, seasonally adjusted, and indexed to January 4-31 2020 in low income (quartile 1 of median income) zipcodes.
- `revenue_ss40`: Percent change in net revenue for small businesses, calculated as a seven-day moving average, seasonally adjusted, and indexed to January 4-31 2020 in transportation (NAICS supersector 40).
- `revenue_ss60`: Percent change in net revenue for small businesses, calculated as a seven-day moving average, seasonally adjusted, and indexed to January 4-31 2020 in professional and business services (NAICS supersector 60).
- `revenue_ss65`: Percent change in net revenue for small businesses, calculated as a seven-day moving average, seasonally adjusted, and indexed to January 4-31 2020 in education and health services (NAICS supersector 65).
- `revenue_ss70`: Percent change in net revenue for small businesses, calculated as a seven-day moving average, seasonally adjusted, and indexed to January 4-31 2020 in leisure and hospitality (NAICS supersector 70).
- `revenue_retail`: Percent change in net revenue for small businesses, calculated as a seven-day moving average, seasonally adjusted, and indexed to January 4-31, 2020 in retail businesses (NAICS 2-digit codes 44-45).
- `revenue_food_accommodation`: Percent change in net revenue for small businesses, calculated as a seven-day moving average, seasonally adjusted, and indexed to January 4-31, 2020 in food and accommodation businesses (NAICS 2-digit code 72).
Note at the County level, these variables are calculated on a weekly basis as a 6-day average of days Monday through Saturday. Omitting Sunday reduces the influence of imputations in small geographic areas, as discussed in the [data documentation](https://github.com/OpportunityInsights/EconomicTracker/blob/main/docs/oi_tracker_data_documentation.md).
### Zearn
Online math learning data from [Zearn](https://www.zearn.org/).
- `engagement`: Average level of students using platform relative to January 6-February 21 2020.
- `engagement_inclow`: Average level of students using platform relative to January 6-February 21 2020 for schools in the 25% of ZIP codes with the lowest median income.
- `engagement_incmiddle`: Average level of students using platform relative to January 6-February 21 2020 for schools in ZIP codes with median income between the 25th and 75th percentiles.
- `engagement_inchigh`: Average level of students using platform relative to January 6-February 21 2020 for schools in the 25% of ZIP codes with the highest median income.
- `badges`: Average level of students achievements earned (badges) on platform relative to January 6-February 21 2020.
- `badges_inclow`: Average level of students achievements earned (badges) on platform relative to January 6-February 21 2020 for schools in the 25% of ZIP codes with the lowest median income.
- `badges_incmiddle`: Average level of students achievements earned (badges) on platform relative to January 6-February 21 2020 for schools in ZIP codes with median income between the 25th and 75th percentiles.
- `badges_inchigh`: Average level of students achievements earned (badges) on platform relative to January 6-February 21 2020 for schools in the 25% of ZIP codes with the highest median income.
<!-- List additional variables in comments so they are detected by `verify-csv-columns.py`
- `break_engagement`:
- `break_badges`:
- `break_engagement_inchigh`:
- `break_badges_inchigh`:
- `break_engagement_inclow`:
- `break_badges_inclow`:
- `break_engagement_incmiddle`:
- `break_badges_incmiddle`:
- `imputed_from_cz`:
-->
Note that for every variable listed here, there is a corresponding variable with the prefix `break_` (for example, `break_engagement`). During the period in which schools are on summer or winter break, we record the outcomes in these `break_` variables instead of the usual variables. These numbers are not displayed on the [Economic Tracker](https://tracktherecovery.org) because they do not reliably measure differences in student learning across geography and income groups when many schools are on break.
To ensure privacy, the results for some counties are masked. Where possible, masked county levels values are replaced by commuting zone means, as indicated by the `imputed_from_cz` variable. The masking criteria are explained in further detail in our [data documentation](https://github.com/OpportunityInsights/EconomicTracker/blob/main/docs/oi_tracker_data_documentation.md#online-math-participation).
## Policy Milestones
Key state-level policy dates relevant for changes in other series trends and values.
- `statename`: The name of the U.S. state
- `statefips`: 2-digit FIPS code of the U.S. state
- `schools_closed`: The date at which the state ordered all public K-12 schools statewide to physically close
- `nonessential_biz_closed`: The date on which the state government ordered all nonessential businesses to close
- `stayathome_start`: The date on which the state government told residents to stay home, save for excepted activities
- `regional_stayathome_end`: The date on which the state’s order telling residents to stay home expired, was lifted, or relaxed for one or more regions in the state
- `statewide_stayathome_end`: The date on which the state’s order telling residents to stay home expired, was lifted, or changed from mandatory to recommended. Either for the entire state all at once, or for the very last locality with a remaining stay at home order.
- `regional_biz_opened`: The date on which the state began reopening significant businesses (typically in-store retail or non-essential manufacturing) for one or more regions in the state
- `statewide_biz_opened`: The date on which the state began reopening significant businesses (typically in-store retail or non-essential manufacturing). Either for the entire state all at once, or for the very last locality which hadn’t yet reopened any businesses.
- `regional_biz_reclosed`: The date on which the state began reclosing businesses (of any sector) that had been reopened previously for one or more regions in the state
- `statewide_biz_reclosed`: The date on which the state began reclosing businesses (of any sector) that had been reopened previously. Either for the entire state all at once, or for the very last locality which hadn’t yet reclosed any businesses.
- `state_milestone#`: The dates on which the state enacted significant new policy milestones that go beyond the existing policy dates tracked. Examples include, ending a reclosure, enacting a new regional reopening system, or issuing a stay-at-home advisory, or enacting an additional reclosure after a state's first. The column name suffix indicates both the chronological order of the milestones within the particular state and corresponds to the particular milestone type as indicated in the description# column.
- `description#`: A description of the policy action represented by the state_milestone# column with the same suffix number. For instance state_milestone1 is described in description1.
| 1257f0056d4d58bd5f578bf5565180d7091873cc | [
"Markdown"
] | 2 | Markdown | NeilMBennett/EconomicTracker | 48b2cbb8f003710f82954af34cce397fa777eab1 | 52662c0fe72e7737199d442a865e8f3df8613787 |
refs/heads/master | <file_sep># Copyright (c) 2018 <NAME>.
# !/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
if __name__ == "__main__":
app = sys.argv[1]
if app == "models":
from curaPrintTimeEstimator.ModelGenerator import ModelGenerator
ModelGenerator.run()
elif app == "slice":
from curaPrintTimeEstimator.helpers.ModelTimeCalculator import ModelTimeCalculator
ModelTimeCalculator.run()
elif app == "generate":
from curaPrintTimeEstimator.ModelDataGenerator import ModelDataGenerator
ModelDataGenerator.run()
elif app == "estimate":
from curaPrintTimeEstimator.CuraPrintTimeEstimator import CuraPrintTimeEstimator
CuraPrintTimeEstimator().run()
else:
raise ValueError("Please pass either 'download', 'slice' or 'analyze' as parameter.")
<file_sep>#!/usr/bin/env python3
from collections import deque
import os
import subprocess
import tempfile
import time
import jinja2
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
class MachineTemplateGenerator:
def __init__(self, file_name: str) -> None:
self.file_name = file_name
self._template = None
def load(self) -> None:
with open(self.file_name, "r", encoding = "utf-8") as f:
data = f.read()
self._template = jinja2.Template(data)
def generate(self, **kwargs) -> str:
return self._template.render(**kwargs)
class Runner:
def __init__(self, templates_dir: str, models_dir: str, results_dir: str) -> None:
self.templates_dir = templates_dir
self.models_dir = models_dir
self.results_dir = results_dir
self._temp_dir = ""
self.model_file_list = []
def start(self) -> None:
# Create a temp dir to generate machine config files
self._temp_dir = tempfile.mkdtemp(prefix = "cura-print-time-estimator-%s-" % time.time())
os.makedirs(self.results_dir, exist_ok = True)
self._get_all_model_files()
self._get_next_template(self.templates_dir)
def _get_all_model_files(self) -> None:
for _, __, file_names in os.walk(self.models_dir):
for file_name in file_names:
if "sphere" in file_name.lower():
continue
file_path = os.path.join(self.models_dir, file_name)
self.model_file_list.append(file_path)
break
def _get_next_template(self, current_dir: str, prefixes = None) -> None:
if prefixes is None:
prefixes = deque()
prefixes.append(os.path.basename(current_dir))
for _, dir_names, file_names in os.walk(current_dir):
template_file_names = [fn for fn in file_names if fn.endswith(".j2")]
for template_file_name in template_file_names:
template_file_path = os.path.join(current_dir, template_file_name)
template_generator = MachineTemplateGenerator(template_file_path)
template_generator.load()
for infill_density in range(0, 105, 10):
template_content = template_generator.generate(infill_sparse_density = infill_density)
machine_file_path = os.path.join(self._temp_dir, "machine.yaml")
with open(machine_file_path, "w", encoding = "utf-8") as f:
f.write(template_content)
for model_file_path in self.model_file_list:
model_file_name = os.path.basename(model_file_path)
gcode_file_name = model_file_name.rsplit(".", 1)[0] + ("_infill_%s_" % infill_density) + ".gcode"
gcode_file_name = "_".join(prefixes) + "_" + gcode_file_name
self.run_cli(machine_file_path, model_file_path, gcode_file_name)
for dir_name in dir_names:
next_dir = os.path.join(current_dir, dir_name)
self._get_next_template(next_dir, prefixes = [])
break
prefixes.pop()
def run_cli(self, machine_file_path: str, model_file_path: str, result_file_name: str) -> None:
cmd = ["docker", "run", "-t", "--rm"]
cmd += ["--user", "1000:1000"]
cmd += ["-v", "%s:/srv/machine.yaml" % os.path.abspath(machine_file_path)]
cmd += ["-v", "%s:/srv/model.stl" % os.path.abspath(model_file_path)]
cmd += ["-v", "%s:/srv/results:rw" % os.path.abspath(self.results_dir)]
cmd += ["cura-cli"]
cmd += ["--machine-yaml", "/srv/machine.yaml"]
cmd += ["--model-file", "/srv/model.stl"]
cmd += ["--output-file", "/srv/results/%s" % result_file_name]
a = subprocess.Popen(cmd)
a.communicate()
if __name__ == "__main__":
result_dir = "results"
runner = Runner(
os.path.join(SCRIPT_DIR, "settings"),
os.path.join(SCRIPT_DIR, "models"),
os.path.join(result_dir)
)
runner.start()
<file_sep>[pytest]
testpaths = .
python_files = Test*.py
python_classes = Test
python_functions = test
<file_sep># Copyright (c) 2018 <NAME>.V.
# !/usr/bin/env python
# -*- coding: utf-8 -*-
from subprocess import CalledProcessError
from unittest import TestCase
from unittest.mock import patch
from curaPrintTimeEstimator.helpers.ModelTimeCalculator import ModelTimeCalculator
class TestModelTimeCalculator(TestCase):
@patch("curaPrintTimeEstimator.helpers.ModelTimeCalculator.check_output")
def test_slice(self, cura_mock):
with open("tests/fixtures/3D_Printer_test.out", "rb") as f:
cura_mock.return_value = f.read()
tester = ModelTimeCalculator()
result = tester.slice("3D_Printer_test_fixed_stl_3rd_gen.STL", "definition", ["settings1", "settings2"])
self.assertEqual(33111, result)
expected_params = [
"/srv/cura/CuraEngine/build/CuraEngine", "slice", "-v", "-o", "/dev/null",
"-j", "/srv/cura/Cura/resources/definitions/definition.def.json",
"-s", "settings1", "-s", "settings2",
"-e0", "-s", "settings1", "-e0", "-s", "settings2",
"-e0", "-l", "/usr/src/app/models/3D_Printer_test_fixed_stl_3rd_gen.STL",
]
cura_mock.assert_called_once_with(expected_params, stderr=-2)
@patch("curaPrintTimeEstimator.helpers.ModelTimeCalculator.check_output")
def test_slice_invalid_output(self, cura_mock):
with open("tests/fixtures/jet_fighter-error.out", "rb") as f:
cura_mock.return_value = f.read()
tester = ModelTimeCalculator()
with self.assertRaises(ValueError):
tester.slice("3D_Printer_test", "definition", ["settings1", "settings2"])
@patch("curaPrintTimeEstimator.helpers.ModelTimeCalculator.check_output")
def test_slice_error(self, cura_mock):
cura_mock.side_effect = CalledProcessError(2, "cmd", b"error")
tester = ModelTimeCalculator()
with self.assertRaises(CalledProcessError):
tester.slice("3D_Printer_test", "definition", ["settings1", "settings2"])
<file_sep># Copyright (c) 2018 <NAME>.V.
# !/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from unittest import TestCase
from unittest.mock import patch
from curaPrintTimeEstimator.ModelDataGenerator import ModelDataGenerator
from curaPrintTimeEstimator.helpers import findModels
class TestModelDataGenerator(TestCase):
maxDiff = None
@patch("curaPrintTimeEstimator.ModelDataGenerator.findModels")
def test_run(self, find_models_mock):
find_models_mock.return_value = ["cube.stl", "sphere.stl"]
ModelDataGenerator().run()
with open(ModelDataGenerator.OUTPUT_FILE) as f:
result = json.load(f)
with open("tests/fixtures/expected_output.json") as f:
expected = json.load(f)
print(result)
self.assertEqual(expected, result)
def test_findModels(self):
result = list(findModels())
self.assertEqual(200, len(result))
<file_sep># Copyright (c) 2018 <NAME>.
# !/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import os
import trimesh
class ModelGenerator:
"""
Generates scaled versions of a model
"""
SCALES = [1 + i / 10.0 for i in range(1, 91)] # increases by 10% every time
@staticmethod
def run():
generator = ModelGenerator()
generator.generate("cube.stl")
generator.generate("sphere.stl")
def generate(self, model_name) -> None:
for scale in self.SCALES:
file_name = os.path.join("models", model_name)
mesh = trimesh.load(file_name) # type: trimesh.Trimesh
mesh.apply_scale(scale)
name, _, ext = file_name.rpartition(".")
new_file_name = "{0}{1:04.1f}.{2}".format(name, scale, ext)
mesh.export(new_file_name)
logging.info("Generated %s", new_file_name)
<file_sep># Copyright (c) 2018 <NAME>.V.
import os
import re
from typing import Iterable
from Settings import Settings
def findModels() -> Iterable[str]:
"""
Finds the STL files available in the 'models' sub folder.
:return: An iterable of model names.
"""
files = os.listdir("{}/models".format(Settings.PROJECT_DIR))
search = re.compile(r".*\.(stl|obj)", re.IGNORECASE)
for model in sorted(files, key=str.casefold):
if search.match(model):
yield model
<file_sep># Cura Print Time Estimator
This repository is part of a research to try to better estimate the printing time of complex models.
The repository will contain code to gather and process data to create models that predict how long a model will take to be 3D-printed given some specific Cura settings.
## Running it locally
You may run the application with docker:
```
./run_in_docker.sh
```
If you have Cura environment in your local machine you may also run the application outside docker, by setting the Cura directory in the environment variable `CURA_DIR` and the CuraEngine path to the executable in the environment variable `CURA_ENGINE`
```
export CURA_DIR="/cura/"
export CURA_ENGINE="/cura/CuraEngine"
python3 main.py
```<file_sep># Copyright (c) 2018 Ultimaker B.V.
<file_sep># Copyright (c) 2018 <NAME>.V.
# !/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import logging
import os
from datetime import timedelta
import re
import sys
from subprocess import check_output, STDOUT, CalledProcessError
from typing import List, Dict, Optional, Iterable, Tuple
from Settings import Settings
from curaPrintTimeEstimator.helpers import findModels
class ModelTimeCalculator:
"""
Class responsible for running the cura engine for all models found in the 'models' directory.
The results are parsed and the expected print time is written to an output file.
"""
# which definition files should be used, excluding the .def.json extension.
DEFINITIONS = ("fdmprinter", )
# The file will contain the output of the time estimation (see self.gatherPrintTimeData)
OUTPUT_FILE = "{}/print_times.json".format(Settings.PROJECT_DIR)
def __init__(self):
self.settings = dict(self._findSettings())
@staticmethod
def run() -> None:
"""
Runs the application.
"""
ModelTimeCalculator().gatherData()
def gatherData(self) -> Dict[str, Dict[str, Dict[str, Optional[int]]]]:
"""
Gathers data about the estimated print time for one model, all settings and all definitions.
:return: A dict with the format {
model_name: {
definition: {settings_name: print_time},
}
}.
"""
settings = dict(self._findSettings())
if os.path.exists(self.OUTPUT_FILE):
with open(self.OUTPUT_FILE) as f:
result = json.load(f)
else:
result = {}
try:
for model in findModels():
result[model] = self.gatherPrintTimeData(model, settings, prev_results=result.get(model))
finally:
with open(self.OUTPUT_FILE, "w") as f:
json.dump(result, f, indent=2)
logging.info("Results written to %s", self.OUTPUT_FILE)
return result
@staticmethod
def _findSettings() -> Iterable[Tuple[str, List[str]]]:
"""
Finds the TXT files available in the 'settings' sub folder.
:return: An iterable of lists of settings each format: (settings_name, settings_parameters).
"""
directory = "{}/settings".format(Settings.PROJECT_DIR)
files = os.listdir(directory)
for name in sorted(files):
if name.endswith(".txt"):
with open("{}/{}".format(directory, name)) as f:
yield name[:-4], f.read().splitlines()
def gatherPrintTimeData(self, model: str, settings: Dict[str, List[str]],
prev_results: Optional[Dict[str, Dict[str, Optional[int]]]]
) -> Dict[str, Dict[str, Optional[int]]]:
"""
Gathers data about the estimated print time for one model, all settings and all definitions.
:param model: The name of the model file, including the extension.
:param settings: A dict with the settings file name and a list of settings for each of the files.
:return: A dict with the format {definition: {settings_name: print_time}}.
"""
result = prev_results or {}
for definition in self.DEFINITIONS:
result.setdefault(definition, {})
for setting_name, settings_parameters in settings.items():
if result[definition].get(setting_name):
logging.info("Model %s, definition %s and settings %s was already sliced, %s seconds to print.",
model, definition, settings, result[definition][setting_name])
else:
result[definition][setting_name] = self.slice(model, definition, settings_parameters)
return result
def slice(self, model_name: str, definition: str, settings: List[str]) -> Optional[int]:
"""
Runs the slicer, returning the estimated amount of seconds to print the model.
:param model_name: The name of the model including the extension.
:param definition: The definition file to be used, without the .def.json extension.
:param settings: The extra settings to be passed to the engine.
:return: The amount of seconds Cura expects the printing will take.
"""
logging.info("Slicing %s with definition %s and settings %s", model_name, definition, settings)
arguments = [
Settings.CURA_ENGINE,
"slice", "-v",
"-o", "NUL" if sys.platform == "win32" else "/dev/null",
"-j", os.path.join(Settings.CURA_DIR, "resources", "definitions", "{}.def.json".format(definition)),
]
# Add the global settings
for s in settings:
arguments.extend(["-s", s])
# Add the extruder0 settings
for s in settings:
arguments.extend(["-e0", "-s", s])
arguments.extend(["-e0", "-l", os.path.join(Settings.PROJECT_DIR, "models", model_name)])
try:
output = check_output(arguments, stderr=STDOUT).decode()
except CalledProcessError as err:
if b"Failed to load model:" in err.output:
logging.warning("Cannot load model %s: %s", model_name, err.output)
return None
else:
logging.error(err.output)
raise
return self._parsePrintTime(output)
@staticmethod
def _parsePrintTime(cura_output: str) -> int:
"""
Finds the expected print time in the output from the Cura engine.
See tests/fixtures for examples of the output.
:param cura_output: The output from the Cura Engine CLI.
:return: The amount of seconds found in the output."""
search = re.search(r"Print time: (\d+)\r?\n", cura_output)
if not search:
raise ValueError("Cannot parse the cura output {}".format(cura_output))
result = int(search.group(1))
logging.info("Model will be printed in %s", timedelta(seconds=result))
return result
<file_sep>#!/usr/bin/groovy
// App configuration.
def appName = "cura-print-time-estimator"
def imageTag = "${appName}:${env.BRANCH_NAME}.${env.BUILD_NUMBER}"
node("docker")
{
stage("Checkout")
{
checkout scm
}
// Build the Docker image for this service
stage("Build")
{
sh "docker build --file Dockerfile.tests --tag ${imageTag} ."
sh "docker rmi ${imageTag}"
currentBuild.result = "SUCCESS"
return
}
}
<file_sep>#!/usr/bin/env bash
# Copyright (c) 2018 Ultimaker B.V.
echo " ****** Running tests ****** "
docker build \
--tag cura-print-time-estimator:tests \
--file Dockerfile.tests . \
|| exit $? # TODO: use main image.
echo " ****** Building our tensorflow image ****** "
docker build \
--tag cura-print-time-estimator:local \
--file Dockerfile . \
|| exit $?
echo " ****** Generating test models ****** "
docker run --rm -it \
--volume $PWD:/srv/host \
--entrypoint python3 \
cura-print-time-estimator:local \
main.py models \
|| exit $?
echo " ****** Slicing all models ****** "
docker run --rm -it \
--volume $PWD:/srv/host \
--env CURA_ENGINE_SEARCH_PATH=/srv/cura/Cura/resources/extruders \
--workdir /srv/host \
--entrypoint python3 \
ultimaker/cura:master-20180307 \
main.py slice \
|| exit $?
echo " ****** Generating test data ****** "
docker run --rm -it \
--volume $PWD:/srv/host \
--entrypoint python3 \
cura-print-time-estimator:local \
main.py generate \
|| exit $?
echo " ****** Running neural network to estimate print times ****** "
docker run --rm -it \
--volume $PWD:/srv/host \
--entrypoint python3 \
cura-print-time-estimator:local \
main.py estimate \
|| exit $?
<file_sep># Copyright (c) 2018 <NAME>.
# !/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import json
from typing import List, Dict, Tuple, Optional
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import numpy as np
from Settings import Settings
from curaPrintTimeEstimator.ModelDataGenerator import ModelDataGenerator
from curaPrintTimeEstimator.helpers import findModels
from curaPrintTimeEstimator.helpers.ModelTimeCalculator import ModelTimeCalculator
from curaPrintTimeEstimator.neuralnetwork.CuraNeuralNetworkModel import CuraNeuralNetworkModel
class CuraPrintTimeEstimator:
"""
Main application file to run the estimator. It includes read the data that was generated in previous steps, train
the NN and make a test/validation process.
"""
# The file that contains the input settings for the training data
MASK_FILE = "{}/mask.json".format(Settings.PROJECT_DIR)
# The file that contains information we gather in a previous step
STATS_FILE = ModelDataGenerator.OUTPUT_FILE
SLICE_FILE = ModelTimeCalculator.OUTPUT_FILE
SETTINGS_DIR = "{}/settings".format(Settings.PROJECT_DIR)
def run(self) -> None:
inputs, targets = self._flattenData(self._getMask())
X_train, X_test, y_train, y_test = train_test_split(inputs, targets, test_size = 0.25)
logging.info("These are the inputs and target for the NN:\nINPUTS: {inputs}\nTARGETS: {targets}"
.format(inputs=inputs, targets=targets))
# Normalize training data
scaler = MinMaxScaler()
X_train_minmax = scaler.fit_transform(X_train)
X_test_minmax = scaler.fit_transform(X_test)
neural_network = CuraNeuralNetworkModel(len(inputs[0]), len(targets[0]), [10, 5])
logging.debug("Minimum {min}:".format(min = np.min(inputs, axis = 0)))
logging.debug("Maximum {max}:".format(max = np.max(inputs, axis = 0)))
neural_network.train(X_train_minmax, y_train)
neural_network.validate(X_test_minmax, y_test)
# print("This is the predicted value:", neural_network.predict([[0.49253, 0.6203, 0.0, 0.01316]]))
def _getMask(self) -> Dict[str, Dict[str, bool]]:
"""
Loads the settings we are using for train the the regression algorithm.
:return: The parsed contents of the mask file.
"""
with open(CuraPrintTimeEstimator.MASK_FILE) as f:
return json.load(f)
def _flattenData(self, mask_data: Dict[str, Dict[str, bool]]) -> Tuple[List[List[Optional[float]]], List[List[float]]]:
"""
Organizes the data collected in previous steps in inputs and target values.
:return: A list of values used as the input for the NN and the printing times as the target values
"""
inputs = []
targets = []
with open(CuraPrintTimeEstimator.STATS_FILE) as f:
stats_data = json.load(f)
with open(CuraPrintTimeEstimator.SLICE_FILE) as f:
slice_data = json.load(f)
for model_name in findModels():
if model_name not in stats_data:
logging.warning("Cannot find stats for %s", model_name)
continue
if model_name not in slice_data:
logging.warning("Cannot find print times for %s", model_name)
continue
# Use the statistics that are the same for the same model
model_stats = list(stats_data[model_name][key] for key in mask_data["model_statistics"])
for definition, settings_profiles in slice_data[model_name].items():
for settings_profile, print_time in settings_profiles.items():
if not print_time:
continue
targets.append([print_time / 3600]) # We store print time as a list, in hours
# Take the values from the setting profiles that are in the mask
settings = self._readSettings(settings_profile)
# settings_data = [1 / settings.get(mask_setting) if is_inverse else settings.get(mask_setting) for mask_setting, is_inverse in mask_data["settings"].items()]
settings_data = [settings.get(mask_setting) for mask_setting, is_inverse in mask_data["settings"].items()]
inputs.append(list(model_stats) + settings_data)
return inputs, targets
def _readSettings(self, settings_profile: str) -> Dict[str, float]:
with open("{}/{}.txt".format(self.SETTINGS_DIR, settings_profile)) as s:
contents = [line.split("=", 2) for line in s.readlines()] # type: List[Tuple[str, str]]
return {key.rstrip(): float(value.lstrip()) for key, value in contents}
<file_sep>---
machine:
name: um3
type: ultimaker3
quality_type: normal
extruders:
0:
variant_name: AA 0.4
material_root_id: generic_pla
settings:
infill_sparse_density: {{ infill_sparse_density }}
1:
enabled: false<file_sep># Copyright (c) 2018 Ultimaker B.V.
FROM tensorflow/tensorflow:latest-py3 AS base
# install requirements
RUN pip3 install --upgrade pip==9.0.*
COPY requirements.txt .
RUN pip3 install --no-cache-dir -r requirements.txt
# copy files
WORKDIR /srv/host/
CMD ["python3", "main.py", "estimate"]
ADD . .
<file_sep># Copyright (c) 2018 <NAME>.
# !/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from typing import Dict, Union
import trimesh
class ModelStatisticsCalculator:
"""
Class responsible for calculating statistics about models.
"""
def __init__(self):
trimesh.util.attach_to_log()
def read(self, model: str) -> Dict[str, Union[int, float]]:
"""
Gathers statistics about the model.
:param model: The name of the model file including the extension.
:return: The statistics about the model with format: {name: value}.
"""
file_name = 'models/{}'.format(model)
file_size = os.path.getsize(file_name)
mesh = trimesh.load(file_name) # type: trimesh.Trimesh
return {
"volume": mesh.volume / 1000, # in cm3
"surface_area": mesh.area / 100, # in cm2
"area_faces": mesh.area_faces.size,
"box_volume": mesh.bounding_box.volume / 1000, # in cm3
"edges": mesh.edges.size,
"mass": mesh.mass,
"vertices": mesh.vertices.size,
"file_size": file_size,
"volume_by_surface_area": mesh.volume / mesh.area,
}
<file_sep># Copyright (c) 2018 <NAME>.
# !/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import logging
from typing import Dict
from Settings import Settings
from curaPrintTimeEstimator.helpers import findModels
from curaPrintTimeEstimator.helpers.ModelStatisticsCalculator import ModelStatisticsCalculator
class ModelDataGenerator:
"""
Main application file to generate data for the Cura Print time estimator.
"""
# The file will contain the output of the time estimation (see self.gatherPrintTimeData)
OUTPUT_FILE = "{}/model_statistics.json".format(Settings.PROJECT_DIR)
# The class responsible for calculating statistics about the model.
stats_calc = ModelStatisticsCalculator()
@staticmethod
def run() -> None:
"""
Runs the application.
"""
ModelDataGenerator().gatherData()
def gatherData(self) -> Dict[str, Dict[str, Dict[str, any]]]:
"""
Gathers data about the estimated print time for one model, all settings and all definitions.
:return: A dict with the format {
model_name: {See `ModelStatisticsCalculator`}
}.
"""
result = {model: self.stats_calc.read(model) for model in findModels()}
with open(self.OUTPUT_FILE, "w") as f:
json.dump(result, f, indent=2)
logging.info("Results written to %s", self.OUTPUT_FILE)
return result
<file_sep># Copyright (c) 2018 <NAME>.V.
# !/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import os
logging.basicConfig(format="%(asctime)s [%(levelname)s] %(module)s:%(lineno)s: %(message)s",
level=os.getenv("LOGGING_LEVEL", "DEBUG"))
class Settings:
"""
Keeps the application settings.
"""
PROJECT_DIR = os.path.dirname(os.path.realpath(__file__))
CURA_DIR = os.getenv("CURA_DIR", "/srv/cura/Cura")
CURA_ENGINE = os.getenv("CURA_ENGINE", "/srv/cura/CuraEngine/build/CuraEngine")
<file_sep># Copyright (c) 2018 <NAME>.V.
# !/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from typing import List
import math
import tensorflow as tf
from Settings import Settings
class CuraNeuralNetworkModel:
"""
Creates a neural network
"""
LOG_FILE = "{}/logs/train.log".format(Settings.PROJECT_DIR)
CURA_NN_FILE = "{}/output/cura_datamodel.ckpt".format(Settings.PROJECT_DIR)
def __init__(self, feature_nr: int, output_nr: int, hidden_layer_neuron_nr: List[int] = None):
"""
:param feature_nr: indicates the number of inputs
:param output_nr: indicates the number of outputs
:param hidden_layer_neuron_nr: indicates the number of neurons that are in the hidden layer
"""
# Create the input and output variables
logging.debug("Creating a NeuralNetwork with {feature_nr} inputs, {output_nr} outputs and {layers} hidden layers with "
"{hidden_nr} neurons".format(feature_nr = feature_nr, output_nr = output_nr, layers = len(hidden_layer_neuron_nr), hidden_nr = hidden_layer_neuron_nr))
self.input = tf.placeholder(tf.float32, [None, feature_nr], name = "input")
self.target = tf.placeholder(tf.float32, [None, output_nr], name = "target")
# Construct the NN with several hidden layers as indicated in the input variable
hidden_input = self.input
hidden_out = self.input
hidden_layer_nr = feature_nr
input_nr = hidden_input._shape_as_list()[1]
count = 0
mean = 1.0
std_dev = 0.1
if hidden_layer_neuron_nr is not None:
for hidden_layer_nr in hidden_layer_neuron_nr:
count += 0
# Create connections from the input layer to the hidden layer
W = tf.Variable(tf.truncated_normal([input_nr, hidden_layer_nr], mean = mean, stddev = std_dev), name = "W{}".format(count))
b = tf.Variable(tf.truncated_normal([hidden_layer_nr], mean = mean, stddev = std_dev), name = "b{}".format(count))
hidden_out = tf.nn.relu(tf.add(tf.matmul(hidden_input, W), b)) # activation of the hidden layer using linear function TODO test with tanh or other activation functions
tf.summary.histogram("activation_{}".format(count), hidden_out)
hidden_input = hidden_out
input_nr = hidden_layer_nr
# Create connections from the hidden layer to the output layer
self.WOut = tf.Variable(tf.truncated_normal([hidden_layer_nr, output_nr], mean = mean, stddev = std_dev), name = "WOut")
self.bOut = tf.Variable(tf.truncated_normal([output_nr], mean = mean, stddev = std_dev), name = "bOut")
self.output = tf.nn.relu(tf.add(tf.matmul(hidden_out, self.WOut), self.bOut), name = "output") # output value
tf.summary.histogram("activationOut".format(count), self.output)
# Function used to calculate the cost between the right output and the predicted output
self.cost_function = tf.reduce_mean(tf.square(tf.subtract(self.target, self.output)))
# self.accuracy = tf.metrics.mean_absolute_error(self.target, self.output)
tf.summary.histogram("cost", self.cost_function)
def train(self, data_train: List[List[float]], target_train: List[List[float]], learning_rate: float = 0.0001,
epochs: int = 10, batch_size: int = 10):
logging.info("################# TRAINING #################")
optimizer = tf.train.GradientDescentOptimizer(learning_rate = learning_rate).minimize(self.cost_function)
saver = tf.train.Saver()
with tf.Session() as sess:
# Initialize the variables
try:
saver.restore(sess, self.CURA_NN_FILE)
logging.debug("Restoring data from {}".format(self.CURA_NN_FILE))
except:
sess.run(tf.global_variables_initializer())
train_writer = tf.summary.FileWriter(self.LOG_FILE, sess.graph)
# Number of batches that will be used for training
batch_nr = int(len(target_train) / batch_size)
# Train the NN for the number of epochs
counter = 0
for epoch in range(epochs):
avg_cost = 0
for index in range(batch_nr):
counter += 1
# Split the training dataset in batches
data_batch = data_train[index * batch_size : min((index + 1) * batch_size, len(data_train))]
target_batch = target_train[index * batch_size : min((index + 1) * batch_size, len(target_train))]
merge = tf.summary.merge_all()
logging.debug("Input {}".format(data_batch[0]))
logging.debug("Estimated output before train {est} ?= {target}".format(
est = sess.run(self.output, feed_dict={self.input: data_batch})[0], target = target_batch[0]))
# Actually train the NN with the provided data
summary, optimizer_result, cost = sess.run([merge, optimizer, self.cost_function], feed_dict = {
self.input: data_batch, self.target: target_batch
})
train_writer.add_summary(summary, counter)
avg_cost += cost / batch_nr
if math.isnan(cost):
return
if (epoch + 1) % 10 == 0 and index == (batch_nr - 1):
w_value = sess.run(self.WOut)
b_value = sess.run(self.bOut)
estimation = sess.run(self.output, feed_dict={self.input: data_batch})
logging.warning("############### Epoch: {epoch} - cost = {cost:.5f}".format(epoch = epoch + 1, cost = cost))
logging.debug("Estimation: {weights} + {bias} = {est} <> {target}".format(
weights = w_value, bias = b_value, est = estimation[0], target = target_batch[0]))
# logging.debug("Accuracy: {acc}".format(acc = accuracy))
# Store the training data
save_path = saver.save(sess, self.CURA_NN_FILE)
logging.warning("Model file saved in {path}".format(path = save_path))
def validate(self, data_test: List[List[float]], target_test: List[List[float]]):
logging.info("################### TEST ###################")
saver = tf.train.Saver()
with tf.Session() as sess:
# Initialize the variables
try:
saver.restore(sess, self.CURA_NN_FILE)
except:
logging.error("No model file found in {path}. Can't continue the validation.".format(path = self.CURA_NN_FILE))
return
# Validate the NN with the provided test data
logging.debug("{data_test} and {target_test}".format(data_test = data_test, target_test = target_test))
logging.debug("{output}".format(output = sess.run(self.cost_function, feed_dict = {self.input: data_test, self.target: target_test})))
def predict(self, data_predict: List[List[float]]) -> List[List[float]]:
logging.info("################ PREDICTION ################")
saver = tf.train.Saver()
with tf.Session() as sess:
# Initialize the variables
try:
saver.restore(sess, self.CURA_NN_FILE)
# Get the variables from the stored NN
graph = tf.get_default_graph()
input = graph.get_tensor_by_name("input:0")
output = graph.get_tensor_by_name("output:0")
# Validate the NN with the provided test data
predicted_value = sess.run(output, feed_dict = {input: data_predict})
logging.debug("{output}".format(output = predicted_value))
except Exception as e:
logging.error("No model file found in {path}. Can't continue the prediction. Exception:\n{exc}".format(path = self.CURA_NN_FILE, exc=str(e)))
return [[]]
return predicted_value
| 76021104926572f83921312718331daa98a66c66 | [
"Shell",
"INI",
"Markdown",
"Groovy",
"Dockerfile",
"Jinja",
"Python"
] | 19 | Shell | developer69K/CuraPrintTimeEstimator | 32165f3b7e21ca4fc74b580749e46969e7053fce | 456859f117af42ff5802d1e64d29e6d1b44d4c7b |
refs/heads/master | <file_sep>import React, { useState } from 'react';
import foodsJSON from "./foods.json";
import FoodBox from "./components/FoodBox"
import AddFoodForm from "./components/AddFoodForm"
function App() {
const [foodData, setFoodData] = useState(foodsJSON);
const [useForm, setUseForm] = useState(foodsJSON);
const addNewFood = (newFood) => {
const updateFoodData = [ ...foodData, newFood ];
const updateFormData = [ ...useForm, newFood ];
setFoodData(updateFoodData);
setUseForm(updateFormData);
};
return (
<div className="App">
{foodData.map((food)=> {
return (
<FoodBox
food={food} key={food.name}/>
)
})}
<AddFoodForm food={foodData} addFood_func={addNewFood} />
</div>
);
}
export default App;
| f93e9d2497990ab50dba83b1c52444c46b0f999e | [
"JavaScript"
] | 1 | JavaScript | tm4gtchi/lab-react-ironnutrition | b43da7da801f039f88f51d45e10bf97437124509 | eec650e690e108369f12e572790ce2ecb78e449d |
refs/heads/master | <file_sep>import queries
import datetime
def active_users(executor):
nicks = queries.all_nicks(executor)
filtered_nicks = list()
for nick in nicks:
if is_user_active(executor, nick) and not is_user_a_bot(nick):
filtered_nicks.append(nick)
return filtered_nicks
def is_user_active(executor, nick):
time = datetime.timedelta(weeks=1)
date = datetime.datetime.utcnow()
date = date - time
num = queries.get_distinct_line_count_since_date(executor, nick, date.isoformat())
return num > 15
def is_user_a_bot(nick):
return nick[-1] == '^'
| bdc914c42d1ad44737748b3d933df27f746d46e3 | [
"Python"
] | 1 | Python | andrewnguyen/rehaiku-bot | 71bafc2691e72ef761a28404380c0191139d8186 | 099bb4c65d6a71c5afe45c607923a1c5d93f90d7 |
refs/heads/master | <repo_name>PramodDutta/Selenium-Testcases-in-Java-with-TestNG<file_sep>/test-output/old/Suite1/Registration Test1.properties
[SuiteResult context=Registration Test1]<file_sep>/src/com/scrolltest/testng/BaseClass.java
package com.scrolltest.testng;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.firefox.FirefoxDriver;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.BeforeMethod;
public class BaseClass {
WebDriver driver;
@BeforeMethod
public void setUp()
{
driver = new FirefoxDriver();
}
@AfterMethod
public void tearDown()
{
//driver.close();
driver.quit();
}
}
<file_sep>/README.md
# README #
Selenium Test Cases in Java with TestNG:
We are going to Test a Demo website “http://demoaut.com/mercuryregister.php” and will write a Test case
to Do a Registration in it.
### What is this repository for? ###
* Qucik Demo
### How do I get set up? ###
Instructions : http://scrolltest.com/?p=189
#### Final Results
![alt tag](http://imageshack.com/a/img661/9491/C38sbT.png)
| 84a7702fdfd1ed56215b2c3aabfb64d1adc9faa4 | [
"Java",
"Markdown",
"INI"
] | 3 | Java | PramodDutta/Selenium-Testcases-in-Java-with-TestNG | 755f78ee58b8a60c72448dad4c921a3bc99aa41a | d896b631ad5c74ca5c1522c60ded7e5fe16cfb8a |
refs/heads/master | <file_sep># mata
Learning
| c78c28306a3150e27e5d2a5243cd87d7e9969a6b | [
"Markdown"
] | 1 | Markdown | Nyamata/mata | 69e7452bdda09fe5df062b3a1cb9e649a3855cfe | 1cd4ef4b9f01cf637b3760523464da922de7dd37 |
refs/heads/master | <file_sep>package model;
import java.awt.Rectangle;
/**
* Represents a segment on the Snake.
* @author <NAME>.
*/
public class Block {
private int x, y, length;
private Rectangle rect;
/**
* Instantiates a block.
* @param x - x coordinate
* @param y - y coordinate
* @param length - length of block
*/
public Block(int x, int y, int length) {
this.x = x;
this.y = y;
this.length = length;
rect = new Rectangle(x, y, length, length);
}
public int getX() { return x; }
public int getY() { return y; }
public int getLen() { return length; }
public Rectangle getRect() { return rect; }
public void setX(int x) {this.x = x;}
public void setY(int y) {this.y = y;}
public void update() {
updateRect();
}
private void updateRect() {
rect.setBounds(x, y, length, length);
}
}<file_sep>package state;
import java.awt.Graphics;
import java.awt.event.KeyEvent;
import java.awt.event.MouseEvent;
import main.GameMain;
public abstract class State {
//Initializes new state
public abstract void init();
//Update current state - called by game loop
public abstract void update();
//Loads images for current state - called by game loop
public abstract void render( Graphics g );
//User input - called when user clicks
public abstract void onClick( MouseEvent e );
//User input - called when user presses key
public abstract void onKeyPress( KeyEvent e );
//User input - called when user releases key
public abstract void onKeyRelease( KeyEvent e );
public void setCurrentState( State newState ) {
GameMain.sGame.setCurrentState( newState );
}
}
<file_sep>package model;
import main.GameMain;
import java.util.LinkedList;
public class Snake {
private LinkedList<Block> snake;
private Block head, tail;
private int size;
private final static int MOVE_SPEED = 15;
public final static int BLOCK_SIZE = 15;
private boolean isLoser;
private boolean isMovingUp, isMovingDown, isMovingLeft, isMovingRight;
public Snake() {
snake = new LinkedList<>();
snake.add(new Block(400, 225, BLOCK_SIZE));
head = snake.getFirst();
tail = snake.getLast();
isMovingUp = false;
isMovingDown = false;
isMovingLeft = false;
isMovingRight = false;
isLoser = false;
size = 1;
}
public void add() {
Block temp;
if (isMovingUp) {
temp = new Block(tail.getX(),
tail.getY() + BLOCK_SIZE, BLOCK_SIZE);
} else if (isMovingDown) {
temp = new Block(tail.getX(),
tail.getY() - BLOCK_SIZE, BLOCK_SIZE);
} else if (isMovingRight) {
temp = new Block(tail.getX() - BLOCK_SIZE,
tail.getY(), BLOCK_SIZE);
} else {
temp = new Block(tail.getX() + BLOCK_SIZE,
tail.getY(), BLOCK_SIZE);
}
snake.add(temp);
tail = snake.getLast();
size++;
}
public void update() {
if (isMovingUp) {
accelUp();
} else if (isMovingDown) {
accelDown();
} else if (isMovingLeft) {
accelLeft();
} else if (isMovingRight) {
accelRight();
}
checkCollision();
for(Block b: snake) {
b.update();
}
}
public void checkCollision() {
if (head.getX() < 0 ||
head.getY() < 0 ||
head.getX() > (GameMain.GAME_WIDTH - BLOCK_SIZE) ||
head.getY() > (GameMain.GAME_HEIGHT - BLOCK_SIZE)) {
isLoser = true;
}
for (Block b : snake) {
if (b != head && head.getRect().intersects(b.getRect())) {
isLoser = true;
}
}
}
public void accelUp() {
setDirection(true, false, false, false);
snake.addFirst( new Block(head.getX(),
head.getY() - MOVE_SPEED, BLOCK_SIZE));
snake.removeLast();
updateHeadTail();
}
public void accelDown() {
setDirection(false, true, false, false);
snake.addFirst( new Block(head.getX(),
head.getY() + MOVE_SPEED, BLOCK_SIZE));
snake.removeLast();
updateHeadTail();
}
public void accelRight() {
setDirection(false, false, true, false);
snake.addFirst( new Block(head.getX() + MOVE_SPEED,
head.getY(), BLOCK_SIZE));
snake.removeLast();
updateHeadTail();
}
public void accelLeft() {
setDirection(false, false, false, true);
snake.addFirst( new Block(head.getX() - MOVE_SPEED,
head.getY(), BLOCK_SIZE));
snake.removeLast();
updateHeadTail();
}
private void updateHeadTail() {
head = snake.getFirst();
tail = snake.getLast();
}
private void setDirection(boolean up, boolean down,
boolean right, boolean left) {
isMovingUp = up;
isMovingDown = down;
isMovingRight = right;
isMovingLeft = left;
}
public Block getHead() {
return head;
}
public Block get(int index) {
return snake.get(index);
}
public int getSize() {
return size;
}
public boolean isLoser() {
return isLoser;
}
public boolean isMovingUp() {
return isMovingUp;
}
public boolean isMovingDown() {
return isMovingDown;
}
public boolean isMovingRight() {
return isMovingRight;
}
public boolean isMovingLeft() {
return isMovingLeft;
}
}
<file_sep># BlockSnake
A recreation of the classic game of snake, using Java.
| 1f100a924a9d2e1238952fc7974b6bd270c2eef2 | [
"Java",
"Markdown"
] | 4 | Java | ashah023/BlockSnake | a0619ad3673d812471c2a78b86b13e5306908c73 | a5e5dea2bff00e5d853570cfabce68962248bfe5 |
refs/heads/main | <file_sep>using System;
using System.Collections.Generic;
using System.Text;
using student;
using group;
using project;
namespace test
{
public class Test
{
static void Main()
{
//Create projects
Project pr1 = new Project("Public Transit Management", "Advance", "Describe the GRT buss mobility system");
Project pr2 = new Project("Games", "Moderate", "Small Tic tac toe games");
Project pr3 = new Project("Calcualtor", "Basic", "Calculate add, sub, into, div");
//Create groups
Group gr1 = new Group("2", "small");
Group gr2 = new Group("4", "Midium");
Group gr3 = new Group("5", "Large");
//Create students info
Student std1 = new Student("Saurav", "8633551");
Student std2 = new Student("Anjali", "356849");
Student std3 = new Student("Mohan", "56464564");
Student std4 = new Student("Neha", "56464564");
Student std5 = new Student("Anisha", "56464564");
//Assigning projects to the groups
gr1.AddSupportedProject(pr3);
gr2.AddSupportedProject(pr2);
gr3.AddSupportedProject(pr1);
//Including student to the group
std1.AddSupportedGroup(gr1);
std3.AddSupportedGroup(gr2);
std2.AddSupportedGroup(gr3);
std4.AddSupportedGroup(gr1);
//Print student’s name, ID, group name, and project description
Console.WriteLine(std1);
Console.WriteLine(std2);
Console.WriteLine(std3);
Console.WriteLine(std4);
}
}
}
<file_sep>using System;
using System.Collections.Generic;
using System.Text;
using project;
using student;
namespace group
{
public class Group
{
private string NumberOfStudents;
private string GroupName;
private HashSet<Project> supportedProjects;
public Group(string NumberOfStudents,string GroupName)
{
this.NumberOfStudents = NumberOfStudents;
this.GroupName = GroupName;
this.supportedProjects = new HashSet<Project>();
}
public void AddSupportedProject(Project pr)
{
this.supportedProjects.Add(pr);
}
public override string ToString()
{
StringBuilder result = new StringBuilder();
result.Append("Number of Student: " +NumberOfStudents + "\n");
result.Append("Group Name: " + this.GroupName + "\n");
foreach (Project pr in this.supportedProjects)
{
result.Append(pr);
result.Append("\n");
}
result.Append("----------------------\n");
return result.ToString();
}
}
}
<file_sep># 8050_Assign-2
Assigning Project to Group and Assigning Student to Group.
<file_sep>using System;
using System.Text;
using System.Collections.Generic;
using group;
using project;
namespace student
{
public class Student
{
private string name;
private string ID;
private HashSet<Group> supportedGroups;
public Student(string name, string ID)
{
this.name = name;
this.ID = ID;
this.supportedGroups = new HashSet<Group>();
}
public void AddSupportedGroup(Group gr)
{
this.supportedGroups.Add(gr);
}
public override string ToString()
{
StringBuilder result = new StringBuilder();
result.Append("Student name: " + this.name + "\n");
result.Append("Student ID: " + this.ID + "\n");
foreach (Group gr in this.supportedGroups)
{
result.Append(gr);
result.Append("\n");
}
result.Append("----------------------\n");
return result.ToString();
}
}
}
<file_sep>using System;
using System.Collections.Generic;
using System.Text;
using group;
using student;
namespace project
{
public class Project
{
private string NameOfProject;
private string Status;
private string Description;
public Project(string NameOfProject, string Status, string Description)
{
this.NameOfProject = NameOfProject;
this.Status = Status;
this.Description = Description;
}
public override string ToString()
{
return "Project Name :" + this.NameOfProject + "\n" + "Staus :" + this.Status + "\n" + "Description :" + this.Description;
}
}
}
| 5b3bc75b0961ce81c63f49073935560ee5d6240f | [
"C#",
"Markdown"
] | 5 | C# | MdSauravKhan/8050_Assign-2 | d38d96f473f8afbcf6469e28da1a0d2c34bfcf52 | 5cf3534cd34a0405097385b007271c96db4c72cf |
refs/heads/master | <repo_name>jason110024/AdviseMe<file_sep>/AdviseMe UT/src/webapp/datastoreObjects/Department.java
package webapp.datastoreObjects;
import java.util.ArrayList;
import com.googlecode.objectify.annotation.Embed;
import com.googlecode.objectify.annotation.Index;
@Index
public class Department implements Comparable<Department>{
String name="No name entered.";
@Embed ArrayList<Course> courseList;
@SuppressWarnings("unused")
private Department(){
this.courseList = new ArrayList<Course>();
}
public Department(String name){
this.name= name;
this.courseList=new ArrayList<Course>();
}
public Department(ArrayList<Course> courseList){
this.courseList=courseList;
}
public Department(String name, ArrayList<Course> courseList){
this.name=name;
this.courseList=courseList;
}
public String getName(){
return this.name;
}
public void addCourse(Course newCourse){
this.courseList.add(newCourse);
}
public ArrayList<Course> getCourseList() {
return courseList;
}
@Override
public int compareTo(Department o) {
return o.getName().compareTo(this.getName());
}
}
<file_sep>/AdviseMe UT/war/about1.jsp
<%@ taglib prefix="fn" uri="http://java.sun.com/jsp/jstl/functions"%>
<html>
<head>
<link type="text/css" rel="stylesheet" href="stylesheets/bootstrap.css">
<script src="http://code.jquery.com/jquery.js"></script>
<title>AdviseMe-About</title>
</head>
<body>
<%
String id = null;
String picurl = null;
String first = null;
String last = null;
String isLoggedIn = null;
HttpSession mysession = request.getSession(false);
if(mysession.getAttribute("id")!=null){
id = (String) mysession.getAttribute("userid");
picurl = (String) mysession.getAttribute("pic");
first = (String) mysession.getAttribute("first");
last = (String) mysession.getAttribute("last");
isLoggedIn = (String) mysession.getAttribute("isLoggedIn");
pageContext.setAttribute("id", id);
pageContext.setAttribute("pic",picurl);
pageContext.setAttribute("first", first);
pageContext.setAttribute("last", last);
pageContext.setAttribute("isLoggedIn", isLoggedIn);
pageContext.setAttribute("guest","false");
}else{
pageContext.setAttribute("guest", "true");
}
%>
<img id="banner" src="Header.png" alt="Banner Image" height="84" width="263"/>
<div class="”container”">
<div class="navbar">
<div class="navbar-inner">
<div class="container">
<ul class="nav">
<li><a href="home.jsp">Home</a></li>
<li class="active"><a href="about.jsp">About</a></li>
<li><a href="courses.jsp">Courses</a></li>
<li><a href="schedule.jsp">Schedule Thing</a></li> <!-- Tentative Title -->
<li><a href="usefulLinks.jsp">Useful Links</a></li>
</ul>
<ul class="nav pull-right">
<ul class="nav">
<li><a href="home.jsp" id=name></a></li>
<li><a class="brand" id=pict href="home.jsp"><img id="profilepic"></a></li>
<li><button type="button" class="btn btn-default" id="loginbuttonref" onclick="window.location.href='login.jsp'">Login</button></li>
</ul>
</ul>
</div>
</div>
</div>
</div>
<div class="container">
<div class="row-fluid">
<div class="span4">
<div class="col-md-4">
<img
src="http://img.photobucket.com/albums/v89/mhking/blog/cat-gun.jpg"
alt="about_pic">
</div>
</div>
<div class="span8">
<div class="col-md-8">
<b>AdviseMe</b> was created in the hopes of making registration for
students at The University of Texas at Austin less stressful. Most
of the time, class descriptions are vague and students aren't aware
of what they're getting into. Often times, students will take
multiple time-consuming classes together, buy books they never end
up using, or get catch unawares on the type of material being
taught in a certain course. <b>AdviseMe</b> is here to resolve all
those problems and make it easier to plan out their schedules.
</div>
</div>
</div>
<br>
<div class="row-fluid">
<div class="span4">
<div class="col-md-4">
<h2>Meet the Team</h2>
</div>
</div>
<div class="span8">
<div class="col-md-8"></div>
</div>
</div>
<div class="row-fluid">
<div class="span4">
<div class="col-md-4">
<ul style="list-style: none;">
<li><img class="lazy"
src="http://images.ak.instagram.com/profiles/profile_7074641_75sq_1389547051.jpg"
data-original="//a.disquscdn.com/dotcom/d-309f716/img/about/headshots/Daniel_Ha.jpg"
style="display: inline;"></li>
<li><img class="lazy"
src="https://scontent-a-dfw.xx.fbcdn.net/hphotos-frc1/t1.0-9/423432_3502781686376_49776637_n.jpg"
data-original="//a.disquscdn.com/dotcom/d-309f716/img/about/headshots/Daniel_Ha.jpg"
style="display: inline;"></li>
<li><img class="lazy"
src="https://scontent-b-dfw.xx.fbcdn.net/hphotos-ash4/t1.0-9/1625728_10152158044867808_59820564_n.jpg"
data-original="//a.disquscdn.com/dotcom/d-309f716/img/about/headshots/Daniel_Ha.jpg"
style="display: inline;"></li>
<li><img class="lazy"
src="https://fbcdn-sphotos-g-a.akamaihd.net/hphotos-ak-prn1/t1.0-9/1527789_10201790003090555_1626646900_n.jpg"
data-original="//a.disquscdn.com/dotcom/d-309f716/img/about/headshots/Daniel_Ha.jpg"
style="display: inline;"></li>
<li><img class="lazy"
src="https://scontent-b-dfw.xx.fbcdn.net/hphotos-ash3/t1.0-9/549587_10201490828445920_1223410407_n.jpg"
data-original="//a.disquscdn.com/dotcom/d-309f716/img/about/headshots/Daniel_Ha.jpg"
style="display: inline;"></li>
</ul>
</div>
</div>
<div class="span8">
<div class="col-md-8">
<ul style="list-style: none;">
<li><NAME></li>
<li><NAME></li>
<li><NAME></li>
<li><NAME></li>
<li><NAME></li>
</ul></div>
</div>
</div>
</div>
<script>
if ("${fn:escapeXml(guest)}" == "false") {
console.log('1');
if("${fn:escapeXml(isLoggedIn)}" == "true"){
console.log('2');
document.getElementById("name").innerHTML = "Welcome, ${fn:escapeXml(first)} ${fn:escapeXml(last)}";
document.getElementById("name").href = "manageaccount.jsp";
document.getElementById("pict").href = "manageaccount.jsp";
document.getElementById("profilepic").src = "${fn:escapeXml(pic)}";
document.getElementById("loginbuttonref").setAttribute("onClick","window.location.href='logout.jsp'");
document.getElementById("loginbuttonref").innerHTML = "Logout";
}else{
console.log('3');
document.getElementById("name").innerHTML = "Welcome, Guest";
document.getElementById("name").href = "home.jsp";
document.getElementById("pict").href = "home.jsp";
document.getElementById("profilepic").src = "";
document.getElementById("loginbuttonref").setAttribute("onClick","window.location.href='login.jsp'");
document.getElementById("loginbuttonref").innerHTML = "Login";
}
} else {
console.log('4');
document.getElementById("name").innerHTML = "Welcome, Guest";
document.getElementById("name").href = "home.jsp";
document.getElementById("pict").href = "home.jsp";
document.getElementById("profilepic").src = "";
document.getElementById("loginbuttonref").setAttribute("onClick","window.location.href='login.jsp'");
document.getElementById("loginbuttonref").innerHTML = "Login";
}
</script>
</body>
</html>
<file_sep>/AdviseMe UT/src/webapp/addServlets/addCourseEdit.java
package webapp.addServlets;
import java.io.IOException;
import java.util.Iterator;
import java.util.List;
import java.util.Properties;
import javax.mail.Message;
import javax.mail.Session;
import javax.mail.Transport;
import javax.mail.internet.InternetAddress;
import javax.mail.internet.MimeMessage;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import webapp.datastoreObjects.Course;
import webapp.datastoreObjects.CourseEdits;
import com.googlecode.objectify.ObjectifyService;
@SuppressWarnings("serial")
public class addCourseEdit extends HttpServlet{
static{ObjectifyService.register(CourseEdits.class);}
public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException{
String courseName = req.getParameter("coursename");
String id = req.getParameter("userID");
String courseTitle = req.getParameter("coursetitle");
String courseDescription = req.getParameter("coursedescription");
String upperDivision = req.getParameter("division");
String professorList = req.getParameter("professorList");
String semesterTaught = req.getParameter("semestersTaught");
String prereqs = req.getParameter("prereqs");
try{
if(courseName==null||courseName.isEmpty()){
throw new Exception("Must provide a valid Course Name!");
}
if(id==null||id.isEmpty()){
throw new Exception("User Id not Passed!");
}
if(courseTitle==null||courseTitle.isEmpty()){
throw new Exception("Must provide a valid Course Title!");
}
if(courseDescription==null||courseDescription.isEmpty()){
throw new Exception("Must provide a valid Course Description!");
}
if(upperDivision==null||upperDivision.isEmpty()){
throw new Exception("Must select Upper/Lower Division!");
}
if(professorList==null||professorList.isEmpty()){
throw new Exception("Must provide professors!");
}
if(semesterTaught==null||semesterTaught.isEmpty()){
throw new Exception("Must provide semesters taught!");
}
if(prereqs==null||prereqs.isEmpty()){
throw new Exception("Must provide Pre-requistites!");
}
boolean upper;
if(upperDivision.equals("upper")){
upper = true;
}else{
upper=false;
}
String change = "User requesting change: " + id+ "\nCourse Name: " +courseName + "\nCourse Title: "+courseTitle+"\nCourse Description: "
+ courseDescription + "\nUpper Division?: " + upperDivision + "\nProfessor List: " +
professorList + "\n Semesters Taught: " + semesterTaught + "\n Prereqs: " + prereqs;
CourseEdits course = new CourseEdits(courseName,id,courseTitle,courseDescription,upper);
course.getProfessorList().add(professorList);
course.getSemesterTaught().add(semesterTaught);
course.getPrereq().add(prereqs);
ObjectifyService.ofy().save().entity(course).now();
List<CourseEdits> temps = ObjectifyService.ofy().load().type(CourseEdits.class).list();
Iterator<CourseEdits> iterator = temps.iterator();
Long ids = null;
while(iterator.hasNext()){
CourseEdits temper = iterator.next();
if(temper.getTitle().equals(course.getTitle())&&temper.getCourseName().equals(course.getCourseName())&&temper.getDescription().equals(course.getDescription())&&temper.getPrereq().equals(course.getPrereq())&&temper.getProfessorList().equals(course.getProfessorList())){
ids = temper.getId();
break;
}
}
//Get old course
ObjectifyService.register(Course.class);
List<Course> list = ObjectifyService.ofy().load().type(Course.class).list();
Iterator<Course> iter = list.iterator();
while(iter.hasNext()){
Course temp = iter.next();
if(temp.getCourseName().equals(courseName)){
change+="\n\nOld Course Info: \n" +"Course Name: " +temp.getCourseName() + "\nCourse Title: "+temp.getTitle()+"\nCourse Description: "
+ temp.getDescription() + "\nUpper Division?: " + temp.getUpperDivision() + "\nProfessor List: " +
temp.getProfessorList() + "\n Semesters Taught: " + temp.getSemesterTaught() + "\n Prereqs: " + temp.getPrereq();
break;
}
}
Properties props = new Properties();
change+="\n\n\nTo approve the changes, reply to <EMAIL> with the subject yes " + ids;
change+="\n\n\nTo discard the change, reply to <EMAIL> with the subject no "+ids;
Session session = Session.getDefaultInstance(props,null);
String address = "<EMAIL>";
Message msg = new MimeMessage(session);
try{
msg.setFrom(new InternetAddress("<EMAIL>", "AdviseMe Course Change"));
msg.addRecipient(Message.RecipientType.TO, new InternetAddress(address));
msg.setSubject("Edit for: "+courseName+" Requested");
msg.setText(change);
Transport.send(msg);
}catch(Exception e1){
System.out.println("Was not able to send change to admin");
}
resp.sendRedirect("thankyou.jsp");
} catch (Exception e){
String logMsg = "Exception in processing request: " + e.getMessage();
throw new IOException(logMsg);
}
}
public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException{
doPost(req,resp);
}
}
<file_sep>/AdviseMe UT/src/webapp/removeServlets/deleteFBUserServlet.java
package webapp.removeServlets;
import static com.googlecode.objectify.ObjectifyService.ofy;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.servlet.http.HttpSession;
import webapp.datastoreObjects.User;
import com.googlecode.objectify.ObjectifyService;
@SuppressWarnings("serial")
public class deleteFBUserServlet extends HttpServlet{
static{ObjectifyService.register(User.class);}
public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException{
String fbId = req.getParameter("id");
HttpSession session = req.getSession(false);
session.setAttribute("first", "");
session.setAttribute("last", "");
session.setAttribute("pic", "");
session.setAttribute("id", "");
session.setAttribute("isLoggedIn", "false");
try{
if(fbId==null||fbId.isEmpty()){
throw new Exception("Facebook not returning valid identification. Please relogin.");
}
List<User> users = ofy().load().type(User.class).list();
Collections.sort(users);
for(User user: users){
if(user.getfbUserId().equals(fbId)){
ofy().delete().entity(user).now();
System.out.println("User: " + fbId + " has been removed.");
resp.sendRedirect("/home.jsp");
}
}
throw new Exception("User account not found in database.");
} catch(Exception e){
String logMsg = "Exception in processing request: " + e.getMessage();
throw new IOException(logMsg);
}
}
}
<file_sep>/AdviseMe UT/src/webapp/cronServlets/userCourseGenerator.java
package webapp.cronServlets;
import static com.googlecode.objectify.ObjectifyService.ofy;
import java.io.IOException;
import java.util.ArrayList;
//import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import webapp.datastoreObjects.Course;
import webapp.datastoreObjects.User;
import com.googlecode.objectify.ObjectifyService;
@SuppressWarnings("serial")
public class userCourseGenerator extends HttpServlet{
public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException{
ObjectifyService.register(User.class);
List<User> users = ofy().load().type(User.class).list();
ObjectifyService.register(Course.class);
List<Course> courses = ofy().load().type(Course.class).list();
if(users.isEmpty()){
System.out.println("User list was empty at Cron Time.");
return;
}
if(courses.isEmpty()){
System.out.println("Course List was empty at Cron Time");
return;
}
Iterator<Course> temporary = courses.iterator();
while(temporary.hasNext()){
ArrayList<String> newlist = new ArrayList<String>();
temporary.next().setUserTaken(newlist);
}
Iterator<User> userIterator = users.iterator();
while(userIterator.hasNext()){
User user = userIterator.next();
System.out.println(user.getFullName());
ArrayList<String> userCourses = user.getUserClassList();
Iterator<String> userCourseIterator = userCourses.iterator();
while(userCourseIterator.hasNext()){
String userCourse = userCourseIterator.next();
Iterator<Course> courseList = courses.iterator();
while(courseList.hasNext()){
Course tempCourse = courseList.next();
if(tempCourse.getCourseName().equals(userCourse)){
System.out.println("Adding: " + user.getfbUserId() + " to course: " + tempCourse.getCourseName());
if(tempCourse.getUserTaken()==null){
ArrayList<String> temp = new ArrayList<String>();
tempCourse.setUserTaken(temp);
}
tempCourse.getUserTaken().add(user.getfbUserId());
}
ofy().save().entity(tempCourse).now();
}
}
}
}
public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException{
doGet(req,resp);
}
}
<file_sep>/AdviseMe UT/src/webapp/serviceServlets/passwordResetServlet.java
package webapp.serviceServlets;
import static com.googlecode.objectify.ObjectifyService.ofy;
import java.io.IOException;
import java.util.List;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import webapp.datastoreObjects.PasswordReset;
import webapp.datastoreObjects.User;
import com.googlecode.objectify.ObjectifyService;
@SuppressWarnings("serial")
public class passwordResetServlet extends HttpServlet{
static{ObjectifyService.register(PasswordReset.class);}
public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException{
String key = req.getParameter("key");
String password = req.getParameter("password");
try{
if(key==null||key.isEmpty()){
throw new Exception("Invalid key. Please use the password reset tool to reset your password.");
}
if(password==null||password.isEmpty()){
throw new Exception("Please enter a password");
}
boolean flag = false;
List<PasswordReset> passwords = ofy().load().type(PasswordReset.class).list();
for(PasswordReset passwordss : passwords){
if(passwordss.getKey().toString().equals(key)){
ObjectifyService.register(User.class);
List<User> user = ofy().load().type(User.class).list();
for(User users: user){
if(users.getfbUserId().equals(passwordss.getUserId())){
users.changePassword(password);
ObjectifyService.ofy().save().entity(users).now();
flag = true;
}
}
}
}
if(!flag){
throw new Exception("Error when resetting password");
}else{
resp.sendRedirect("home.jsp");
}
}catch(Exception e){
String logMsg = "Exception in processing request: " + e.getMessage();
throw new IOException(logMsg);
}
}
}
<file_sep>/AdviseMe UT/src/webapp/addServlets/addFBUserServlet.java
package webapp.addServlets;
import static com.googlecode.objectify.ObjectifyService.ofy;
import java.io.IOException;
import java.util.List;
import java.util.Properties;
import javax.mail.Message;
import javax.mail.Multipart;
import javax.mail.Session;
import javax.mail.Transport;
import javax.mail.internet.InternetAddress;
import javax.mail.internet.MimeBodyPart;
import javax.mail.internet.MimeMessage;
import javax.mail.internet.MimeMultipart;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.servlet.http.HttpSession;
import net.tanesha.recaptcha.ReCaptchaImpl;
import net.tanesha.recaptcha.ReCaptchaResponse;
import webapp.datastoreObjects.User;
import com.googlecode.objectify.ObjectifyService;
@SuppressWarnings("serial")
public class addFBUserServlet extends HttpServlet {
static {
ObjectifyService.register(User.class);
}
public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException{
String FBId = req.getParameter("id");
String FBFirst = req.getParameter("firstname");
String FBLast = req.getParameter("lastname");
String FBEmail = req.getParameter("email");
String username = req.getParameter("username");
String password = req.getParameter("<PASSWORD>");
String remoteAddr = req.getRemoteAddr();
String challenge = req.getParameter("recaptcha_challenge_field");
String response = req.getParameter("recaptcha_response_field");
try{
ReCaptchaImpl reCaptcha = new ReCaptchaImpl();
reCaptcha.setPrivateKey("<KEY>");
ReCaptchaResponse reCaptchaReponse = reCaptcha.checkAnswer(remoteAddr, challenge, response);
if(!reCaptchaReponse.isValid()){
resp.sendRedirect("createaccount.jsp?error=captcha");
}else{
User user;
if(FBFirst==null||FBFirst.isEmpty()){
throw new Exception("Must enter a first name");
}
if(FBLast==null||FBLast.isEmpty()){
throw new Exception("Must enter a last name.");
}
if(FBEmail==null||FBEmail.isEmpty()){
throw new Exception("Must enter an email.");
}
if(password==null||password.isEmpty()){
throw new Exception("Must enter a password.");
}
if(username==null||username.isEmpty()){
throw new Exception("Must enter a username.");
}
List<User> userList = ObjectifyService.ofy().load().type(User.class).list();
for(User users: userList){
if(users.getUsername().equals(username)){
resp.sendRedirect("createaccount.jsp?error=user");
}
if(users.getUserEmail().equals(FBEmail)){
resp.sendRedirect("createaccount.jsp?error=email");
}
}
if(FBId==null||FBId.isEmpty()){
user = new User(FBFirst,FBLast,FBEmail,username,password);
user.setLoginStatus(true);
}else{
user = new User(FBId,FBFirst,FBLast,FBEmail,username,password);
user.setLoginStatus(true);
}
ofy().save().entity(user).now();
User temp = ofy().load().entity(user).get();
FBId=temp.getfbUserId();
HttpSession session = req.getSession(true);
session.setAttribute("first", FBFirst);
session.setAttribute("last", FBLast);
session.setAttribute("id", FBId);
session.setAttribute("isLoggedIn", "true");
Properties props = new Properties();
Session session1 = Session.getDefaultInstance(props,null);
Message msg = new MimeMessage(session1);
String htmlBody = "<!doctype html>"
+ " <html xmlns=\"http://www.w3.org/1999/xhtml\">"
+ " <head>"
+ " <meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\" />"
+ " <title>Responsive Email Template</title>"
+ " <style type=\"text/css\">"
+ " .ReadMsgBody {width: 100%; background-color: #ffffff;}"
+ " .ExternalClass {width: 100%; background-color: #ffffff;}"
+ " body {width: 100%; background-color: #ffffff; margin:0; padding:0; -webkit-font-smoothing: antialiased;font-family: Arial, Helvetica, sans-serif}"
+ " table {border-collapse: collapse;}"
+ " @media only screen and (max-width: 640px) {"
+ " body[yahoo] .deviceWidth {width:440px!important; padding:0;}"
+ " body[yahoo] .center {text-align: center!important;} "
+ " }"
+ " @media only screen and (max-width: 479px) {"
+ " body[yahoo] .deviceWidth {width:280px!important; padding:0;}"
+ " body[yahoo] .center {text-align: center!important;} "
+ " }"
+ " </style>"
+ " </head>"
+ " <body leftmargin=\"0\" topmargin=\"0\" marginwidth=\"0\" marginheight=\"0\" yahoo=\"\fix\" style=\"font-family: Arial, Helvetica, sans-serif\">"
+ " <!-- Wrapper -->"
+ " <table width=\"100%\" border=\"0\" cellpadding=\"0\" cellspacing=\"0\" align=\"center\">"
+ " <tr>"
+ " <td width=\"100%\" valign=\"top\">"
+ " <!--Start Header-->"
+ " <table width=\"700\" bgcolor=\"#fff\" border=\"0\" cellpadding=\"0\" cellspacing=\"0\" align=\"center\" class=\"deviceWidth\">"
+ " <tr>"
+ " <td style=\"padding: 6px 0px 0px\">"
+ " <table width=\"650\" border=\"0\" cellpadding=\"0\" cellspacing=\"0\" align=\"center\" class=\"deviceWidth\">"
+ " <tr>"
+ " <td width=\"100%\" >"
+ " <!--Start logo-->"
+ " <table border=\"0\" cellpadding=\"0\" cellspacing=\"0\" align=\"left\" class=\"deviceWidth\">"
+ " <tr>"
+ " <td class=\"center\" style=\"padding: 20px 0px 10px 0px\">"
+ " <a href=\"http://advisemeut.appspot.com/\"><img src=\"http://advisemeut.appspot.com/assets/img/logo1-default.png\"></a>"
+ " </td>"
+ " </tr>"
+ " </table><!--End logo-->"
+ " </td>"
+ " </tr>"
+ " </table>"
+ " </td>"
+ " </tr>"
+ " </table> "
+ " <!--End Header-->"
+ " <!--Start Top Block-->"
+ " <table width=\"100%\" bgcolor=\"#e67e22\" border=\"0\" cellpadding=\"0\" cellspacing=\"0\" align=\"center\" class=\"deviceWidth\">"
+ " <tr>"
+ " <td>"
+ " <table width=\"700\" bgcolor=\"#e67e22\" border=\"0\" cellpadding=\"0\" cellspacing=\"0\" align=\"center\" class=\"deviceWidth\">"
+ " <tr>"
+ " <td width=\"100%\">"
+ " <!--Left box-->"
+ " <table width=\"100%\" border=\"0\" cellpadding=\"0\" cellspacing=\"0\" class=\"deviceWidth\">"
+ " <tr>"
+ " <td class=\"center\" style=\"font-size: 16px; color: #ffffff; font-weight: bold; text-align: left; font-family: Arial, Helvetica, sans-serif; line-height: 25px; vertical-align: middle; padding: 50px 0px 0 20px; \">"
+ " Thank you for joining AdviseMe! "
+ " </td>"
+ " </tr>"
+ " <tr>"
+ " <td class=\"center\" style=\"font-size: 14px; color: #ffffff; font-weight: bold; text-align: left; font-family: Arial, Helvetica, sans-serif; line-height: 25px; vertical-align: middle; padding: 20px; \" >"
+ " Now you can help other users with their registration questions! Please feel free to add courses that you have taken, rate a course, edit a course description, or more! "
+ " </td>"
+ " </tr>"
+ " <tr>"
+ " <td class=\"center\" style=\"font-size: 12px; color: #ffffff; font-weight: bold; text-align: left; font-family: Arial, Helvetica, sans-serif; line-height: 20px; vertical-align: middle; padding: 50px 10px 0; \">"
+ " Stay Connected With AdviseMe! "
+ " </td>"
+ " </tr>"
+ " <tr>"
+ " <td class=\"center\" style=\"font-size: 12px; color: #ffffff; font-weight: bold; text-align: left; font-family: Arial, Helvetica, sans-serif; line-height: 20px; vertical-align: middle; padding: 20px; \">"
+ " <button class=\"btn-u btn-u-blue\" type=\"button\" onclick=\"window.location.href='http://jasona-ee461l-webappblog.appspot.com/''\">Blog</button>"
+ " <button class=\"btn-u btn-u-blue\" type=\"button\" onclick=\"window.location.href='https://twitter.com/AdviseMeUT'\">Twitter</button>"
+ " <button class=\"btn-u btn-u-blue\" type=\"button\" onclick=\"window.location.href='https://www.youtube.com/channel/UCTE-dQdEBZQpOIphPIec_Og'\">YouTube</button>"
+ " </td>"
+ " </tr>"
+ " </table><!--End Left box-->"
+ " <!-- Right box -->"
+ " </td>"
+ " </tr>"
+ " </table>"
+ " </td>"
+ " </tr>"
+ " </table>"
+ " <!--End Top Block-->"
+ " <!-- Footer -->"
+ " <table width=\"700\" bgcolor=\"#fff\" border=\"0\" cellpadding=\"0\" cellspacing=\"0\" align=\"center\" class=\"deviceWidth\">"
+ " <tr>"
+ " <td>"
+ " <table width=\"700\" border=\"0\" cellpadding=\"0\" cellspacing=\"0\" align=\"center\" class=\"deviceWidth\">"
+ " <tr>"
+ " <td class=\"center\" style=\"font-size: 12px; color: #687074; font-weight: bold; text-align: center; font-family: Arial, Helvetica, sans-serif; line-height: 20px; vertical-align: middle; padding: 10px 50px 30px; \" >"
+ " 2014 © AdviseMe. ALL Rights Reserved. "
+ " </td>"
+ " </tr>"
+ " </table>"
+ " </td>"
+ " </tr>"
+ " </table>"
+ " <!--End Footer-->"
+ " <div style=\"height:15px\"> </div><!-- divider -->"
+ " </td>"
+ " </tr>"
+ " </table> "
+ " <!-- End Wrapper -->"
+ " </body>"
+ " </html>";
Multipart mp = new MimeMultipart();
MimeBodyPart htmlPart = new MimeBodyPart();
htmlPart.setContent(htmlBody, "text/html");
mp.addBodyPart(htmlPart);
msg.setContent(mp);
msg.setSubject("Welcome to AdviseMe!");
try{
msg.setFrom(new InternetAddress("<EMAIL>", "Welcome to Advise Me!"));
msg.addRecipient(Message.RecipientType.TO, new InternetAddress(FBEmail));
Transport.send(msg);
}catch(Exception e){
System.out.println("Was not able to send change to admin");
}finally{
resp.sendRedirect("/addusercourses.jsp?id="+FBId);
}
}
} catch(Exception e){
String logMsg = "Exception in processing request: " + e.getMessage();
throw new IOException(logMsg);
}
}
}
<file_sep>/AdviseMe UT/src/webapp/addServlets/addCollegeServlet.java
package webapp.addServlets;
import java.io.IOException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
public class addCollegeServlet extends HttpServlet{
private static final long serialVersionUID = 1L;
//static{ObjectifyService.register(School.class);}
public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException{
/* String schoolName = req.getParameter("schoolname");
String collegeName = req.getParameter("collegename");
if(schoolName==null){
//Should be impossible?
}else if(schoolName.isEmpty()){
//Should be impossible?
}else if(collegeName==null){
//Should be impossible?
}else if(collegeName.isEmpty()){
//Should be impossible?
}else{
//TODO: Need to create check to make sure not adding duplicate college within school
List<School> schoolList=ObjectifyService.ofy().load().type(School.class).list();
Collections.sort(schoolList);
College college = new College(collegeName);
for(School school: schoolList){
if(school.getName().equals(schoolName)){
school.addCollege(college);
ofy().save().entity(school).now();
resp.sendRedirect("/home.jsp");
}
}
resp.sendRedirect("/home.jsp"); //TODO: Really should redirect to error page showing that entity was not added.
}*/
}
}
<file_sep>/README.md
AdviseMe
========
EE 461L Project
<file_sep>/AdviseMe UT/war/createaccount1.jsp
<%@ page import="webapp.addServlets.*" %>
<%@ page import="net.tanesha.recaptcha.ReCaptcha" %>
<%@ page import="net.tanesha.recaptcha.ReCaptchaFactory" %>
<%@ taglib prefix="fn" uri="http://java.sun.com/jsp/jstl/functions" %>
<html>
<head>
<link href="stylesheets/bootstrap.css" rel="stylesheet" media="screen">
<script src="http://code.jquery.com/jquery.js"></script>
<script src="stylesheets/bootstrap.js"></script>
<title>AdviseMe- Create Account</title>
<h1>Create Account</h1>
</head>
<body>
<img id="banner" src="Header.png" alt="Banner Image" height="84" width="263"/>
<script>
// Load FB SDK
(function(d){
var js, id = 'facebook-jssdk', ref = d.getElementsByTagName('script')[0];
if(d.getElementById(id)){
return;
}
js = d.createElement('script'); js.id = id; js.async = true;
js.src = "//connect.facebook.net/en_US/all.js";
ref.parentNode.insertBefore(js, ref);
}(document));
window.fbAsyncInit = function(){
FB.init({
appId : '125801300852907',
status : true, // check login status
cookie : true, // enable cookies to allow the server to access the session
xfbml : true // parse XFBML
});
FB.Event.subscribe('auth.authResponseChange', function(response){
FB.login();
});
};
function checkLogin(){
console.log('Retrieving User ID and Name');
FB.api('/me', function(response){
var first=response.first_name;
var last=response.last_name;
var id=response.id;
var email=response.email;
document.getElementById("first").innerHTML=first;
document.getElementById("last").innerHTML=last;
document.getElementById("id").innerHTML=id;
document.getElementById("email").innerHTML=email;
});
}
</script>
<div class="”container”">
<div class="navbar">
<div class="navbar-inner">
<div class="container">
<ul class="nav">
<li class="active"><a href="home.jsp">Home</a></li>
<li><a href="about.jsp">About</a></li>
<li><a href="courses.jsp">Courses</a></li>
<li><a href="usefulLinks.jsp">Useful Links</a></li>
</div>
</div>
</div>
</div>
<div class="fb-login-button" data-scope="email" data-max-rows="1" data-size="medium" data-show-faces="false" data-auto-logout-link="false"></div>
<form action="/addfacebookuser" method="post">
<div>First Name:<textarea name="firstname" id="first" rows="1" cols="30"></textarea></div>
<div>Last Name:<textarea name="lastname" id="last" rows="1" cols="30"></textarea></div>
<div>Password:<textarea name="<PASSWORD>" id="<PASSWORD>" rows="1" cols="30"></textarea></div>
<div>Confirm Password:<textarea name="confirmpass" id="confirmpass" rows="1" cols="30"></textarea></div>
<div>Email:<textarea name="email" id="email" rows="1" cols="30"></textarea></div>
<div><textarea name="id" id="id" rows="1" cols="30" style="display:none;"></textarea></div>
<%
ReCaptcha c = ReCaptchaFactory.newReCaptcha("6LfFIe8SAAAAAFvovPN2pD-lUKHixmEufNFITZ91", "6LfFIe8SAAAAADGueFM28Toq3H3OJWqB2xTpoj-A", false);
out.print(c.createRecaptchaHtml(null, null));
%>
<div><input type="submit" value="Create Account" /></div>
<input type="button" value="Cancel" onclick="window.location.href='/home.jsp'">
</form>
</body>
</html><file_sep>/AdviseMe UT/src/webapp/datastoreObjects/PasswordReset.java
package webapp.datastoreObjects;
import com.googlecode.objectify.annotation.Entity;
import com.googlecode.objectify.annotation.Id;
import com.googlecode.objectify.annotation.Index;
@Index
@Entity
public class PasswordReset {
@Id private Long key;
private String userId;
//private SimpleDateFormat date;
public PasswordReset(){}
public PasswordReset(Long id, String user){
this.key=id;
this.userId=user;
// this.date = new SimpleDateFormat("MM-dd-yyyy");
}
public Long getKey(){
return this.key;
}
public String getUserId(){
return this.userId;
}
/*
public SimpleDateFormat getDate(){
return this.date;
}
*/
}
<file_sep>/AdviseMe UT/war/addcourse.jsp
<%@ page contentType="text/html;charset=UTF-8" language="java"%>
<%@ page import="webapp.addServlets.*"%>
<%@ page import="webapp.datastoreObjects.*"%>
<%@ page import="java.util.List"%>
<%@ page import="java.util.Collections"%>
<%@ page import="com.googlecode.objectify.*"%>
<%@ taglib prefix="fn" uri="http://java.sun.com/jsp/jstl/functions"%>
<html>
<head>
<link href="stylesheets/bootstrap.css" rel="stylesheet" media="screen">
<script src="http://code.jquery.com/jquery.js"></script>
<script src="stylesheets/bootstrap.js"></script>
<title>AdviseMe- Add Courses</title>
</head>
<body>
<%
String id = null;
String picurl = null;
String first = null;
String last = null;
String isLoggedIn = null;
HttpSession mysession = request.getSession(false);
if(mysession.getAttribute("id")!=null){
if(id.equalsIgnoreCase("1022031149") || id.equalsIgnoreCase("1032439494") || id.equalsIgnoreCase("508774773") || id.equalsIgnoreCase("520989352") || id.equalsIgnoreCase("603798784")){
id = (String) mysession.getAttribute("userid");
picurl = (String) mysession.getAttribute("pic");
first = (String) mysession.getAttribute("first");
last = (String) mysession.getAttribute("last");
isLoggedIn = (String) mysession.getAttribute("isLoggedIn");
pageContext.setAttribute("id", id);
pageContext.setAttribute("pic", picurl);
pageContext.setAttribute("first", first);
pageContext.setAttribute("last", last);
pageContext.setAttribute("isLoggedIn", isLoggedIn);
pageContext.setAttribute("guest", "false");
}else{
throw new Exception("You're not even an admin!!");
}
}else {
throw new Exception("Why you even not logged in though!!");
}
%>
<img id="banner" src="Header.png" alt="Banner Image" height="84" width="263"/>
<div class="�container�">
<div class="navbar">
<div class="navbar-inner">
<div class="container">
<ul class="nav">
<li class="active"><a href="home.jsp">Home</a></li>
<li><a href="about.jsp">About</a></li>
<li><a href="courses.jsp">Courses</a></li>
<li><a href="usefulLinks.jsp">Useful Links</a></li>
</ul>
<ul class="nav pull-right">
<li><a href="home.jsp" id=name></a></li>
<li><a class="brand" id=pict href="home.jsp"><img id="profilepic"></a></li>
<li><button type="button" class="btn btn-default" id="loginbuttonref" onclick="window.location.href='login.jsp'">Login</button></li>
</ul>
</div>
</div>
</div>
</div>
<form class="well" action="/addcourse" method="post">
<label>Course Abbreviation</label>
<textarea name="coursename" rows="1" cols="30" placeholder="Enter Abbrev..."></textarea>
<span class="help-inline">Ex. EE 360C</span>
<br/>
<label>Course Title</label>
<textarea name="coursetitle" rows="1" cols="30" placeholder="Enter Title..."></textarea>
<span class="help-inline">Ex. Algorithms</span>
<br>
<label>Course Description</label>
<textarea name="coursedescription" rows="3" cols="30" placeholder="Enter Description..."></textarea>
<span class="help-inline">Ex. This course involves...</span>
<br>
<label>Upper/Lower Division</label>
<input type="radio" id="up" name="division" value="upper">Upper
<input type="radio" id="low" name="division" value="lower">Lower
<br><br>
<label>Professors</label>
<textarea name="professorList" rows="3" cols="30" placeholder="Enter Professors..."></textarea>
<span class="help-inline">Comma separated list (Ex. Julien,Ghosh,etc...)</span>
<br>
<label>Semesters Taught</label>
<textarea name="semestersTaught" rows="3" cols="30" placeholder="Enter Semesters..."></textarea>
<span class="help-inline">Comma separated list (Ex. Fall 2012,Spring 2013,Summer 2013,etc...)</span>
<br>
<label>Textbooks</label>
<textarea name="prereqs" rows="3" cols="30" placeholder="Enter Textbooks..."></textarea>
<span class="help-inline">Comma separated list (Ex. Title Author ISBN,etc...)</span>
<br>
<button type="submit" name="submitButton" class="btn" >Add Course</button>
</form>
<script>
if ("${fn:escapeXml(guest)}" == "false") {
console.log('1');
if("${fn:escapeXml(isLoggedIn)}" == "true"){
console.log('2');
document.getElementById("name").innerHTML = "Welcome, ${fn:escapeXml(first)} ${fn:escapeXml(last)}";
document.getElementById("name").href = "manageaccount.jsp";
document.getElementById("pict").href = "manageaccount.jsp";
document.getElementById("profilepic").src = "${fn:escapeXml(pic)}";
document.getElementById("loginbuttonref").setAttribute("onClick","window.location.href='logout.jsp'");
document.getElementById("loginbuttonref").innerHTML = "Logout";
}else{
console.log('3');
document.getElementById("name").innerHTML = "Welcome, Guest";
document.getElementById("name").href = "home.jsp";
document.getElementById("pict").href = "home.jsp";
document.getElementById("profilepic").src = "";
document.getElementById("loginbuttonref").setAttribute("onClick","window.location.href='login.jsp'");
document.getElementById("loginbuttonref").innerHTML = "Login";
}
} else {
console.log('4');
document.getElementById("name").innerHTML = "Welcome, Guest";
document.getElementById("name").href = "home.jsp";
document.getElementById("pict").href = "home.jsp";
document.getElementById("profilepic").src = "";
document.getElementById("loginbuttonref").setAttribute("onClick","window.location.href='login.jsp'");
document.getElementById("loginbuttonref").innerHTML = "Login";
}
</script>
</body>
</html><file_sep>/AdviseMe UT/src/webapp/datastoreObjects/CourseEdits.java
package webapp.datastoreObjects;
import java.util.ArrayList;
import com.googlecode.objectify.annotation.Entity;
import com.googlecode.objectify.annotation.Id;
import com.googlecode.objectify.annotation.Index;
@Index
@Entity
public class CourseEdits implements Comparable<Course> {
private String courseName ="No course name entered.";
@Id Long id;
private String title = "No course title entered.";
private String description= "Default UT description";
private String userID;
private Boolean upperDivision; //true if upper division; false if lower division
private ArrayList<String> professorList;
private ArrayList<String> semestersTaught;
private ArrayList<String> prereqs;
@SuppressWarnings("unused")
private CourseEdits(){}
public CourseEdits(String courseName, String user){
this.courseName=courseName;
this.professorList = new ArrayList<String>();
this.semestersTaught = new ArrayList<String>();
this.userID=user;
this.prereqs = new ArrayList<String>();
}
public CourseEdits(String courseName, String user, String title){
this.courseName=courseName;
this.title=title;
this.professorList = new ArrayList<String>();
this.semestersTaught = new ArrayList<String>();
this.userID=user;
this.prereqs = new ArrayList<String>();
}
public CourseEdits(String courseName, String user, String title, String description,boolean upperDiv){
this.courseName=courseName;
this.title=title;
this.description=description;
this.professorList = new ArrayList<String>();
this.semestersTaught = new ArrayList<String>();
this.prereqs = new ArrayList<String>();
this.userID=user;
this.upperDivision = upperDiv;
}
public Long getId(){
return this.id;
}
public String getUserId(){
return this.userID;
}
public String getCourseName(){
return this.courseName;
}
public String getTitle(){
return this.title;
}
public String getDescription(){
return this.description;
}
public boolean getUpperDivision() {
return this.upperDivision;
}
public void setUpperDivision(boolean upperDivision) {
this.upperDivision = upperDivision;
}
public ArrayList<String> getProfessorList(){
return this.professorList;
}
public String getProfessorList(Boolean val){
StringBuilder profs = new StringBuilder();
int size = professorList.size();
for(int i=0;i<size;i++){
if(i==size-1){
profs.append(professorList.get(i));
}
else profs.append(professorList.get(i) + ", ");
}
return profs.toString();
}
public ArrayList<String> getSemesterTaught(){
return this.semestersTaught;
}
public String getSemesterTaught(Boolean val){
StringBuilder sems = new StringBuilder();
int size = semestersTaught.size();
for(int i=0;i<size;i++){
if(i==size-1){
sems.append(semestersTaught.get(i));
}
else sems.append(semestersTaught.get(i) + ", ");
}
return sems.toString();
}
public ArrayList<String> getPrereq(){
return this.prereqs;
}
public String getPrereq(Boolean val){
StringBuilder reqs = new StringBuilder();
int size = prereqs.size();
for(int i=0;i<size;i++){
if(i==size-1){
reqs.append(prereqs.get(i));
}
else reqs.append(prereqs.get(i) + ", ");
}
return reqs.toString();
}
@Override
public int compareTo(Course o) {
return this.getCourseName().compareTo(o.getCourseName());
}
}
<file_sep>/AdviseMe UT/src/webapp/checkServlets/checkUserServlet.java
package webapp.checkServlets;
import static com.googlecode.objectify.ObjectifyService.ofy;
import java.io.IOException;
import java.util.List;
import javax.servlet.RequestDispatcher;
import javax.servlet.ServletContext;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import webapp.datastoreObjects.User;
import com.googlecode.objectify.ObjectifyService;
@SuppressWarnings("serial")
public class checkUserServlet extends HttpServlet{
static{ObjectifyService.register(User.class);}
public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException{
String username = req.getParameter("username");
String password = req.getParameter("password");
try{
if(username==null||username.isEmpty()){
throw new Exception("Please enter a username");
}
if(password==null||password.isEmpty()){
throw new Exception("Please enter a password");
}
List<User> users = ofy().load().type(User.class).list();
boolean flag = false;
String id=null;
for(User user: users){
if(user.getUsername().equals(username)){
System.out.println("User: " + username + " is an AdviseMe user.");
boolean match = user.authenticate(password);
if(match){
//user provided authentic credentials
id=user.getfbUserId();
System.out.println("User: " + username + " provided the correct password.");
flag=true;
break;
}else{
//wrong password
System.out.println("User: " + username + " did not provide the correct password.");
flag=true;
break;
}
}
}
//if code reaches here, then user has not registered before.
//if code reacher here, return false
if(!flag){
System.out.println("Facebook ID: " + username + " is not an AdviseMe user.");
}
if(id==null||id.isEmpty()||id.equals("")){
resp.sendRedirect("login.jsp?error=true");
}
ServletContext sc = getServletContext();
RequestDispatcher rd = sc.getRequestDispatcher("/changelogintrue?id="+id);
rd.forward(req, resp);
} catch(Exception e){
String logMsg = "Exception in processing request: " + e.getMessage();
throw new IOException(logMsg);
}
}
}
<file_sep>/AdviseMe UT/war/adddepartment.jsp
<%--
<%@ page contentType="text/html;charset=UTF-8" language="java" %>
<%@ page import="webapp.addServlets.*" %>
<%@ page import="webapp.datastoreObjects.*" %>
<%@ page import="java.util.List" %>
<%@ page import="java.util.Collections" %>
<%@ page import="com.googlecode.objectify.*" %>
<%@ taglib prefix="fn" uri="http://java.sun.com/jsp/jstl/functions" %>
<html>
<head>
<link href="stylesheets/bootstrap.css" rel="stylesheet" media="screen">
<script src="http://code.jquery.com/jquery.js"></script>
<script src="stylesheets/bootstrap.js"></script>
<title>AdviseMe- Add Departments</title>
<h1>Add a Department</h1>
</head>
<body>
<%
ObjectifyService.register(School.class);
List<School> schools=ObjectifyService.ofy().load().type(School.class).list();
Collections.sort(schools);
if(schools.isEmpty()){
%><h1>There are no schools entered.:(</h1><%
}else if(schools.get(0).getCollegeList().isEmpty()){
%><h1>There are no colleges to add a department to.:(</h1><%
}else{//TODO: need to figure out how to first select school, then populate college list.
%>
<form action="/adddepartment" method="post">
<h3>School:</h3><div>
<select name="schoolname" size="1">
<%
for(School school: schools){
pageContext.setAttribute("school_name",school.getName());
%>
<option>${fn:escapeXml(school_name)}</option>
<%
} %>
</select>
</div>
<h3>College:</h3><div>
<select name="collegename" size="1">
<%
for(College college: schools.get(0).getCollegeList()){
pageContext.setAttribute("college_name",college.getName());
%>
<option>${fn:escapeXml(college_name)}</option>
<%
} %>
</select>
</div>
<h3>Department Name:</h3>
<div><textarea name="departmentname" rows="1" cols="30"></textarea></div>
<div><input type="submit" value="Add Department" /></div>
<input type="button" value="Cancel" onclick="window.location.href='/home.jsp'">
</form>
<%
} %>
</body>
</html>
--%><file_sep>/AdviseMe UT/src/webapp/datastoreObjects/School.java
package webapp.datastoreObjects;
import java.util.ArrayList;
import com.googlecode.objectify.annotation.Embed;
import com.googlecode.objectify.annotation.Entity;
import com.googlecode.objectify.annotation.Id;
import com.googlecode.objectify.annotation.Index;
@Index
@Entity
public class School implements Comparable<School>{
@Id String name="No school name entered.";
@Embed ArrayList<College> collegeList;
@SuppressWarnings("unused")
private School(){
collegeList = new ArrayList<College>();
}
public School(String name){
this.name=name;
this.collegeList=new ArrayList<College>();
}
public School(ArrayList<College> collegeList){
this.collegeList = collegeList;
}
public School(String name, ArrayList<College> collegeList){
this.name=name;
this.collegeList=collegeList;
}
public String getName(){
return this.name;
}
public void addCollege(College newCollege){
this.collegeList.add(newCollege);
}
public ArrayList<College> getCollegeList(){
return collegeList;
}
@Override
public int compareTo(School o) {
return o.getName().compareTo(this.getName());
}
}
<file_sep>/AdviseMe UT/war/logout1.jsp
<html>
<head>
<link href="stylesheets/bootstrap.css" rel="stylesheet" media="screen">
<script src="http://code.jquery.com/jquery.js"></script>
<script src="stylesheets/bootstrap.js"></script>
</head>
<body>
<img id="banner" src="Header.png" alt="Banner Image" height="84" width="263"/>
<div id="fb-root"></div>
<script>
window.fbAsyncInit = function(){
FB.init({
appId : '125801300852907',
status : true, // check login status
cookie : true, // enable cookies to allow the server to access the session
xfbml : true // parse XFBML
});
FB.Event.subscribe('auth.authResponseChange', function(response){
if(response.status === 'connected'){
checkLogin();
}else if(response.status === 'not_authorized'){
FB.login({
scope: 'basic_info'
});
}else{
FB.login({
scope: 'basic_info'
});
}
});
};
(function(d){
var js, id = 'facebook-jssdk', ref = d.getElementsByTagName('script')[0];
if(d.getElementById(id)){
return;
}
js = d.createElement('script'); js.id = id; js.async = true;
js.src = "//connect.facebook.net/en_US/all.js";
ref.parentNode.insertBefore(js, ref);
}(document));
function checkLogin(){
document.getElementById("test").innerHTML="Logging Out....Redirecting";
FB.api('/me', function(response){
var id=response.id;
$.ajax({
type:'GET',
url : "changeloginstatus?id="+id,
cache : false,
success: function(response){
window.location.replace('home.jsp');
}
});
});
}
</script>
<h1>Logout</h1>
<div class="hero-unit">
<h2 id="test"></h2>
</div>
</body>
</html><file_sep>/AdviseMe UT/src/webapp/addServlets/addCourseServlet.java
package webapp.addServlets;
import static com.googlecode.objectify.ObjectifyService.ofy;
import java.io.IOException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import webapp.datastoreObjects.Course;
import com.googlecode.objectify.ObjectifyService;
@SuppressWarnings("serial")
public class addCourseServlet extends HttpServlet{
static{ObjectifyService.register(Course.class);}
public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException{
//String schoolName = req.getParameter("schoolname");
//String collegeName = req.getParameter("collegename");
//String departmentName = req.getParameter("departmentname");
String courseName = req.getParameter("coursename");
String courseTitle = req.getParameter("coursetitle");
String courseDescription = req.getParameter("coursedescription");
String upperDivision = req.getParameter("division");
String professorList = req.getParameter("professorList");
String semesterTaught = req.getParameter("semestersTaught");
String prereqs = req.getParameter("prereqs");
try{
/*if(schoolName==null){
//Should be impossible?
}else if(schoolName.isEmpty()){
//Should be impossible?
}else if(collegeName==null){
//Should be impossible?
}else if(collegeName.isEmpty()){
//Should be impossible?
}else if(departmentName==null){
//Should be impossible?
}else if(departmentName.isEmpty()){
//Should be impossible?
}else if(courseName==null){
//Should be impossible?
}else if(courseName.isEmpty()){
//Should be impossible?
}else{//TODO: Need to create check to make sure not adding duplicate courses within departments
*/
if(courseName==null||courseName.isEmpty()){
throw new Exception("Must provide a valid Course Name!");
}
if(courseTitle==null||courseTitle.isEmpty()){
throw new Exception("Must provide a valid Course Title!");
}
if(courseDescription==null||courseDescription.isEmpty()){
throw new Exception("Must provide a valid Course Description!");
}
if(upperDivision==null||upperDivision.isEmpty()){
throw new Exception("Must select Upper/Lower Division!");
}
if(professorList==null||professorList.isEmpty()){
throw new Exception("Must provide professors!");
}
if(semesterTaught==null||semesterTaught.isEmpty()){
throw new Exception("Must provide semesters taught!");
}
if(prereqs==null||prereqs.isEmpty()){
throw new Exception("Must provide textbooks!");
}
boolean upper;
if(upperDivision.equals("upper")){
upper = true;
}else{
upper=false;
}
Course course = new Course(courseName,courseTitle,courseDescription,upper);
//TODO: Need to parse the list correctly and add the professors correctly
course.getProfessorList().add(professorList);
course.getSemesterTaught().add(semesterTaught);
course.getPrereq().add(prereqs);
//for(School school: schoolList){
// if(school.getName().equals(schoolName)){
// for(College colleges: school.getCollegeList()){
// if(colleges.getName().equals(collegeName)){
// for(Department departments: colleges.getDepartmentList()){
// if(departments.getName().equals(departmentName)){
// departments.addCourse(course);
// ofy().save().entity(school).now();
// resp.sendRedirect("/home.jsp");
// }
// }
// }
// }
// }
//}
ofy().save().entity(course).now();
resp.sendRedirect("/home.jsp");
//}
} catch (Exception e){
String logMsg = "Exception in processing request: " + e.getMessage();
throw new IOException(logMsg);
}
}
}
<file_sep>/AdviseMe UT/src/webapp/addServlets/addPasswordReset.java
package webapp.addServlets;
import java.io.IOException;
import java.util.List;
import java.util.Properties;
import java.util.Random;
import javax.mail.Message;
import javax.mail.Session;
import javax.mail.Transport;
import javax.mail.internet.InternetAddress;
import javax.mail.internet.MimeMessage;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import webapp.datastoreObjects.PasswordReset;
import webapp.datastoreObjects.User;
import com.googlecode.objectify.ObjectifyService;
@SuppressWarnings("serial")
public class addPasswordReset extends HttpServlet{
public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException{
String email = req.getParameter("email");
long range = 1234567L;
Random r = new Random();
PasswordReset temp;
boolean flag=false;
long number = (long)(r.nextDouble()*range);
try{
if(email==null||email.isEmpty()){
throw new Exception("Email required!");
}
ObjectifyService.register(User.class);
List<User> user = ObjectifyService.ofy().load().type(User.class).list();
for(User users: user){
if(users.getUserEmail().equals(email)){
ObjectifyService.register(PasswordReset.class);
temp = new PasswordReset(number, users.getfbUserId());
ObjectifyService.ofy().save().entity(temp).now();
flag = true;
break;
}
}
if(flag){
Properties props = new Properties();
Session session = Session.getDefaultInstance(props,null);
Message msg = new MimeMessage(session);
try{
msg.setFrom(new InternetAddress("<EMAIL>", "AdviseMe Password Reset"));
msg.addRecipient(Message.RecipientType.TO, new InternetAddress(email));
msg.setSubject("Password Reset");
msg.setText("You have submitted a request for a password reset. (If you have recieved this message in error, please visit advisemeut.appspot.com/contact.jsp ASAP.\n\n"
+ "Go to http://advisemeut.appspot.com/resetpassword.jsp?key="+number
+ "\n\nYou have 24 hours to reset your password.");
Transport.send(msg);
}catch(Exception e1){
System.out.println("Was not able to send change to admin");
}
resp.sendRedirect("home.jsp");
}else{
resp.sendRedirect("requestresetpassword.jsp?error=true");
}
}catch(Exception e){
String logMsg = "Exception in processing request: " + e.getMessage();
throw new IOException(logMsg);
}
}
}
<file_sep>/AdviseMe UT/src/webapp/checkServlets/checkLoginStatus.java
package webapp.checkServlets;
import static com.googlecode.objectify.ObjectifyService.ofy;
import java.io.IOException;
import java.util.Date;
import java.util.List;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.servlet.http.HttpSession;
import webapp.datastoreObjects.User;
import com.googlecode.objectify.ObjectifyService;
@SuppressWarnings("serial")
public class checkLoginStatus extends HttpServlet{
static{ObjectifyService.register(User.class);}
public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException{
String id = req.getParameter("id");
HttpSession session = req.getSession(false);
try{
if(id==null||id.isEmpty()){
throw new Exception("Facebook not returning valid identification. Please relogin.");
}
List<User> users = ofy().load().type(User.class).list();
for(User user: users){
if(user.getfbUserId().equals(id)){
Boolean status = user.getLoginStatus();
if(status==true){
Date current = new Date();
Date temp = user.getLoginDate();
long diff = current.getTime() - temp.getTime();
long diffHours = diff / (60 * 60 * 1000) % 24;
long diffDays = diff / (24 * 60 * 60 * 1000);
long diffSeconds = diff / 1000 % 60;
long diffMinutes = diff / (60 * 1000) % 60;
if(diffHours>=1){
System.out.println("User: " + id + " has been auto-logged out.");
System.out.println("The user was inactive for: " +
diffDays + " day(s), " + diffHours + " hour(s), " +
diffMinutes + " minute(s), " + diffSeconds + "second(s).");
user.setLoginStatus(false);
session.setAttribute("isLoggedIn", "false");
status=false;
ofy().save().entity(user).now();
}else{
user.resetLoginDate();
ofy().save().entity(user).now();
}
}
resp.setContentType("text/plain");
resp.setCharacterEncoding("UTF-8");
resp.getWriter().write(status.toString());
break;
}
}
} catch(Exception e){
String logMsg = "Exception in processing request: " + e.getMessage();
throw new IOException(logMsg);
}
}
}<file_sep>/AdviseMe UT/src/webapp/serviceServlets/chatServlet.java
package webapp.serviceServlets;
import static com.googlecode.objectify.ObjectifyService.ofy;
import java.io.IOException;
import java.util.List;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import webapp.datastoreObjects.Course;
import webapp.datastoreObjects.User;
import com.google.appengine.api.xmpp.JID;
import com.google.appengine.api.xmpp.Message;
import com.google.appengine.api.xmpp.MessageBuilder;
import com.google.appengine.api.xmpp.SendResponse;
import com.google.appengine.api.xmpp.XMPPService;
import com.google.appengine.api.xmpp.XMPPServiceFactory;
import com.googlecode.objectify.ObjectifyService;
public class chatServlet extends HttpServlet{
private static final long serialVersionUID = 1L;
public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException{
String strCallResult = "";
resp.setContentType("text/plain");
XMPPService xmpp = null;
JID fromJid = null;
try{
xmpp = XMPPServiceFactory.getXMPPService();
Message msg = xmpp.parseMessage(req);
fromJid = msg.getFromJid();
String msgBody = msg.getBody();
String strCommand = msgBody;
if(strCommand==null){
throw new Exception("You must give a command.");
}
strCommand=strCommand.trim();
if(strCommand.length()==0){
throw new Exception("You must give a command.");
}
String[] words = strCommand.split("@");
if(words.length>=1){
//Command == help
if(words[0].equalsIgnoreCase("help")){
StringBuffer SB = new StringBuffer();
SB.append("*****Help*****"+"\n");
SB.append("Valid Commands include:"+"\n");
SB.append("help" + "\n");
SB.append("about" + "\n");
SB.append("addcourse" + "\n");
SB.append("getuser" + "\n");
SB.append("resetcourserating" + "\n");
strCallResult = SB.toString();
}else if(words[0].equalsIgnoreCase("about")){
StringBuffer SB = new StringBuffer();
SB.append("This is AdviseMe Bot 2014"+"\n");
SB.append("My master, <NAME> made me smart!"+"\n");
SB.append("Type help to see a list of commands!"+"\n");
strCallResult = SB.toString();
//Command == addcourse
}else if(words[0].equalsIgnoreCase("addcourse")){
String[]courseInfo = words[1].split("#");
boolean flag = addCourse(courseInfo[0],courseInfo[1],courseInfo[2],courseInfo[3],courseInfo[4],courseInfo[5],courseInfo[6]);
if(flag){
strCallResult = "Course Successfully Added/Changed!";
}else{
strCallResult = "You done goofed. Something happened. Blame Jason.";
}
//Command == getuser
}else if(words[0].equalsIgnoreCase("getuser")){
//send back user info
String[] userInfo = words[1].split("#");
if(userInfo.length>1){
String result = getUserInfo(userInfo[0],userInfo[1]);
if(result==null){
strCallResult = "User not found.";
}else{
strCallResult = result;
}
}else{
String result = getUserInfo(userInfo[0]);
if(result==null){
strCallResult = "User not found.";
}else{
strCallResult = result;
}
}
//Command == resetcourserating
}else if(words[0].equalsIgnoreCase("resetcourserating")){
System.out.println("Before");
boolean flag = resetCourseRating(words[1]);
System.out.println("After");
if(flag){
strCallResult = "Course Rating was Reset Successfully!";
}else{
strCallResult = "You done goofed. Something happened. Blame Jason.";
}
}else{
strCallResult = "I don't understand what you are telling me!";
}
}
Message replyMessage = new MessageBuilder().withRecipientJids(fromJid).withBody(strCallResult).build();
SendResponse status = xmpp.sendMessage(replyMessage);
status.getStatusMap().get(fromJid);
}catch (Exception ex){
System.out.println(ex.getMessage());
Message replyMessage = new MessageBuilder().withRecipientJids(fromJid).withBody(ex.getMessage()).build();
SendResponse status = xmpp.sendMessage(replyMessage);
status.getStatusMap().get(fromJid);
}
}
protected void doPost(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
doGet(req, resp);
}
public boolean addCourse(String courseName,String courseTitle,String courseDescription,String upperDivision,String professorList, String semesterTaught, String prereqs){
ObjectifyService.register(Course.class);
if(courseName==null||courseName.isEmpty()){
return false;
}
if(courseTitle==null||courseTitle.isEmpty()){
return false;
}
if(courseDescription==null||courseDescription.isEmpty()){
return false;
}
if(upperDivision==null||upperDivision.isEmpty()){
return false;
}
if(professorList==null||professorList.isEmpty()){
return false;
}
if(semesterTaught==null||semesterTaught.isEmpty()){
return false;
}
if(prereqs==null||prereqs.isEmpty()){
return false;
}
boolean upper;
if(upperDivision.equals("upper")){
upper = true;
}else{
upper=false;
}
Course course = new Course(courseName,courseTitle,courseDescription,upper);
if(professorList.isEmpty()||professorList==null||professorList.equalsIgnoreCase("none")){
course.getProfessorList().add("None");
}else{
String[] temp = professorList.split("&");
for(int i=0;i<temp.length;i++){
course.getProfessorList().add(temp[i]);
}
}
if(semesterTaught.isEmpty()||semesterTaught==null||semesterTaught.equalsIgnoreCase("none")){
course.getSemesterTaught().add("None");
}else{
String[] temp = semesterTaught.split("&");
for(int i=0;i<temp.length;i++){
course.getSemesterTaught().add(temp[i]);
}
}
if(prereqs.isEmpty()||prereqs==null||prereqs.equalsIgnoreCase("none")){
course.getPrereq().add("None");
}else{
String[] temp = prereqs.split("&");
for(int i=0;i<temp.length;i++){
String[] temp2 = temp[i].split(",");
String toadd = temp2[0];
for(int k=1;k<temp2.length;k++){
toadd+=" or " + temp2[k];
}
course.getPrereq().add(toadd);
}
}
ofy().save().entity(course).now();
return true;
}
public boolean resetCourseRating(String courseName){
ObjectifyService.register(Course.class);
if(courseName==null||courseName.isEmpty()){
return false;
}
List<Course> courses = ObjectifyService.ofy().load().type(Course.class).list();
for(Course course: courses){
if(course.getCourseName().equals(courseName)){
course.resetRating();
ofy().save().entity(course).now();
return true;
}
}
return false;
}
public String getUserInfo(String firstName, String lastName){
ObjectifyService.register(User.class);
if(firstName==null||lastName==null||firstName.isEmpty()||lastName.isEmpty()){
return null;
}
String result=null;
List<User> users = ObjectifyService.ofy().load().type(User.class).list();
for(User user: users){
if(user.getFullName().equalsIgnoreCase(firstName + " " + lastName)){
StringBuffer SB = new StringBuffer();
SB.append("Facebook ID: " + user.getfbUserId()+"\n");
SB.append("Full Name: " + user.getFullName()+"\n");
SB.append("Email : " + user.getUserEmail()+"\n");
SB.append("Logged In?: " + user.getLoginStatus()+"\n");
SB.append("User Class List : " + user.getUserClassList().toString()+"\n");
result = SB.toString();
}
}
return result;
}
public String getUserInfo(String fbID){
ObjectifyService.register(User.class);
String result = null;
List<User> users = ObjectifyService.ofy().load().type(User.class).list();
for(User user: users){
if(user.getfbUserId().equals(fbID)){
StringBuffer SB = new StringBuffer();
SB.append("Facebook ID: " + user.getfbUserId()+"\n");
SB.append("Full Name: " + user.getFullName()+"\n");
SB.append("Email : " + user.getUserEmail()+"\n");
SB.append("Logged In?: " + user.getLoginStatus()+"\n");
SB.append("User Class List : " + user.getUserClassList().toString()+"\n");
result = SB.toString();
}
}
return result;
}
}<file_sep>/AdviseMe UT/src/webapp/checkServlets/changeLoginFalse.java
package webapp.checkServlets;
import static com.googlecode.objectify.ObjectifyService.ofy;
import java.io.IOException;
import java.util.List;
import javax.servlet.RequestDispatcher;
import javax.servlet.ServletContext;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.servlet.http.HttpSession;
import webapp.datastoreObjects.User;
import com.googlecode.objectify.ObjectifyService;
@SuppressWarnings("serial")
public class changeLoginFalse extends HttpServlet {
static{ObjectifyService.register(User.class);}
public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException{
String id = req.getParameter("id");
HttpSession session = req.getSession(false);
try{
if(id==null||id.isEmpty()){
throw new Exception("User Id was not passed correctly. Please Try again");
}
List<User> users = ofy().load().type(User.class).list();
boolean flag = false;
for(User user: users){
if(user.getfbUserId().equals(id)){
Boolean status = user.getLoginStatus();
if(status){
user.setLoginStatus(false);
System.out.println("User: "+ id +" has logged out.");
session.setAttribute("isLoggedIn", "false");
ofy().save().entity(user).now();
}
flag = true;
break;
}
}
if(!flag){
throw new Exception("User account not found in database.");
}
ServletContext sc = getServletContext();
RequestDispatcher rd = sc.getRequestDispatcher("/createsessionservlet?id="+id);
rd.forward(req, resp);
} catch(Exception e){
String logMsg = "Exception in processing request: " + e.getMessage();
throw new IOException(logMsg);
}
}
}
<file_sep>/AdviseMe UT/src/webapp/addServlets/addUserCourses.java
package webapp.addServlets;
import static com.googlecode.objectify.ObjectifyService.ofy;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import webapp.datastoreObjects.User;
import com.googlecode.objectify.ObjectifyService;
@SuppressWarnings("serial")
public class addUserCourses extends HttpServlet {
static {
ObjectifyService.register(User.class);
}
public void doPost(HttpServletRequest req, HttpServletResponse resp)
throws IOException {
String id = req.getParameter("id");
String usercourses[] = req.getParameterValues("course");
try {
if (id == null || id.isEmpty()) {
throw new Exception(
"Facebook not returning valid identification. Please relogin.");
}
System.out.println("Id passed is:" + id);
if (usercourses.length == 0) {
throw new Exception(
"No courses selected. Please select a course(s).");
}
List<User> users = ofy().load().type(User.class).list();
ArrayList<String> newCourses = new ArrayList<String>();
for (int i = 0; i < usercourses.length; i += 1) {
newCourses.add(usercourses[i]);
}
for (User user : users) {
if (user.getfbUserId().equals(id)) {
ArrayList<String> courseList = user.getUserClassList();
for (int i = 0; i < usercourses.length; i += 1) {
if (!courseList.contains(usercourses[i])) {
user.addUserClass(usercourses[i]);
}
}
for (int k = 0; k < usercourses.length; k += 1) {
Iterator<String> iterator = courseList.iterator();
while (iterator.hasNext()) {
String next = iterator.next();
if (!newCourses.contains(next)) {
iterator.remove();
}
}
}
ofy().save().entity(user).now();
resp.sendRedirect("/rateusercourses.jsp");
}
}
throw new Exception("User account not found in database.");
} catch (Exception e) {
String logMsg = "Exception in processing request: "
+ e.getMessage();
throw new IOException(logMsg);
}
}
}
<file_sep>/AdviseMe UT/src/webapp/checkServlets/updateUsefulRating.java
package webapp.checkServlets;
import static com.googlecode.objectify.ObjectifyService.ofy;
import java.io.IOException;
import java.util.List;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import webapp.datastoreObjects.Course;
import com.googlecode.objectify.ObjectifyService;
@SuppressWarnings("serial")
public class updateUsefulRating extends HttpServlet{
static{ObjectifyService.register(Course.class);}
public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException{
String temp = req.getParameter("useful");
String courseName = req.getParameter("course");
String id = req.getParameter("id");
try{
if(temp==null||temp.isEmpty()){
throw new Exception("Invalid rating passed to servlet.");
}
if(courseName==null||courseName.isEmpty()){
throw new Exception("Invalid Course Name passed to servlet");
}
if(id==null||id.isEmpty()||id.equalsIgnoreCase("")||id.equalsIgnoreCase(" ")||id.equalsIgnoreCase("undefined")){
throw new Exception("Invalid Course Name passed to servlet");
}
Double rating = Double.parseDouble(temp);
List<Course> courses = ofy().load().type(Course.class).list();
for(Course course: courses){
if(course.getCourseName().equals(courseName)){
System.out.println("Old Rating for "+courseName+" was :"+course.getUse());
course.processUseful(rating,id);
System.out.println("New Rating for " + courseName+" is :"+ course.getUse());
ofy().save().entity(course).now();
resp.setContentType("text/plain");
resp.setCharacterEncoding("UTF-8");
resp.getWriter().write(course.getUse().toString());
break;
}
}
}catch(Exception e){
String logMsg = "Exception in processing request: " + e.getMessage();
throw new IOException(logMsg);
}
}
public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException{
doGet(req,resp);
}
}
<file_sep>/AdviseMe UT/war/courseinfo1.jsp
<%@ page import="java.util.*"%>
<%@ page import="webapp.datastoreObjects.Course"%>
<%@ page import="com.googlecode.objectify.Objectify"%>
<%@ page import="com.googlecode.objectify.ObjectifyService"%>
<%@ page import="com.google.appengine.api.users.User"%>
<%@ page import="com.google.appengine.api.users.UserService"%>
<%@ page import="com.google.appengine.api.users.UserServiceFactory"%>
<%@ taglib prefix="fn" uri="http://java.sun.com/jsp/jstl/functions"%>
<html>
<head>
<link type="text/css" rel="stylesheet" href="stylesheets/bootstrap.css">
<link type="text/css" rel="stylesheet" href="rateit.css">
<script src="http://code.jquery.com/jquery.js"></script>
<script src="jquery.rateit.js"></script>
<title>AdviseMe-CourseInfo</title>
</head>
<body>
<%
String id = null;
String picurl = null;
String first = null;
String last = null;
String isLoggedIn = null;
HttpSession mysession = request.getSession(false);
if(mysession.getAttribute("id")!=null){
id = (String) mysession.getAttribute("userid");
picurl = (String) mysession.getAttribute("pic");
first = (String) mysession.getAttribute("first");
last = (String) mysession.getAttribute("last");
isLoggedIn = (String) mysession.getAttribute("isLoggedIn");
pageContext.setAttribute("id", id);
pageContext.setAttribute("pic",picurl);
pageContext.setAttribute("first", first);
pageContext.setAttribute("last", last);
pageContext.setAttribute("isLoggedIn", isLoggedIn);
if(isLoggedIn.equalsIgnoreCase("true")){
pageContext.setAttribute("readonly", "false");
}else{
pageContext.setAttribute("readonly", "true");
}
pageContext.setAttribute("guest","false");
}else{
pageContext.setAttribute("guest", "true");
pageContext.setAttribute("readonly", "true");
}
%>
<img id="banner" src="Header.png" alt="Banner Image" height="84" width="263"/>
<div class="”container”">
<div class="navbar">
<div class="navbar-inner">
<div class="container">
<ul class="nav">
<li><a href="home.jsp">Home</a></li>
<li><a href="about.jsp">About</a></li>
<li class="active"><a href="courses.jsp">Courses</a></li>
<li><a href="schedule.jsp">Schedule Thing</a></li>
<!-- Tentative Title -->
<li><a href="usefulLinks.jsp">Useful Links</a></li>
</ul>
<ul class="nav pull-right">
<li><a href="home.jsp" id=name></a></li>
<li><a class="brand" id=pict href="home.jsp"><img id="profilepic"></a></li>
<li><button type="button" class="btn btn-default" id="loginbuttonref" onclick="window.location.href='login.jsp'">Login</button></li>
</ul>
</div>
</div>
</div>
</div>
<%
//retrieve courses
ObjectifyService.register(Course.class);
List<Course> courses = ObjectifyService.ofy().load().type(Course.class).list();
Collections.sort(courses);
String name = request.getParameter("courseName");
pageContext.setAttribute("courseName",name);
//Course current;
//System.out.println(name);
for(Course course : courses){
if(course.getCourseName().equals(name)){
//current = course;
pageContext.setAttribute("course_title", course.getTitle());
pageContext.setAttribute("course_abbreviation", course.getCourseName());
pageContext.setAttribute("course_description", course.getDescription());
pageContext.setAttribute("course_professorList", course.getProfessorList());
pageContext.setAttribute("course_semestersTaught", course.getSemesterTaught());
pageContext.setAttribute("course_prereq", course.getPrereq());
pageContext.setAttribute("course_syllabus_link", course.getSyllabusLink());
pageContext.setAttribute("course_eval_link", course.getEvalLink());
pageContext.setAttribute("course_num_users_rating", course.getNumRating());
pageContext.setAttribute("course_rating", ((double)Math.round(course.getAvg() * 10) / 10));
pageContext.setAttribute("course_testing", course.getAvg());
break;
}
}
%>
<textarea rows="1" cols="1" id="fbidd" style="display:none"></textarea>
<div class="row">
<div class="span10">
<div class="col-md-10">
<h3>Title: ${fn:escapeXml(course_title)}, Abbreviation:
${fn:escapeXml(course_abbreviation)}</h3>
</div>
</div>
</div>
<script>
function GetURLParameter(sParam){
var sPageURL = window.location.search.substring(1);
var sURLVariables = sPageURL.split('&');
for(var i=0;i<sURLVariables.length;i++){
var sParameterName = sURLVariables[i].split('=');
if(sParameterName[0]==sParam){
return sParameterName[1];
}
}
}
function subscribe() {
var email = prompt("Please enter your email","<EMAIL>");
var courseName = GetURLParameter('courseName');
$.ajax({
type : 'GET',
url : "addcoursesubscriber?email=" + email + "&course=" + courseName,
cache : false,
success : function(response) {
if(response=="true"){
}
}
});
}
</script>
<h3>Course Difficulty: </h3><div class="rateit" id="rateit5" data-rateit-resetable="false" data-rateit-value="${fn:escapeXml(course_rating)}" data-rateit-ispreset="true" data-rateit-readonly="${fn:escapeXml(readonly)}" data-rateit-step=".5" data-rateit-min="0" data-rateit-max="10"></div>
<script type="text/javascript">
$("#rateit5").bind('rated',
function(event, value){
var courseName = GetURLParameter('courseName');
$.ajax({
type: 'GET',
url: "updatecourserating?rating="+value+"&course="+courseName+"&id=${fn:escapeXml(id)}",
cache: false,
success: function(response){
}
});
});
$('#rateit5').on('beforerated', function (e, value) {
if (!confirm('Are you sure you want to rate this item: ' + value + ' stars?')) {
e.preventDefault();
}
});
</script>
<h4>${fn:escapeXml(course_num_users_rating)} users rate this course: ${fn:escapeXml(course_rating)}</h4>
<br>
<br>
<button type="button" class="btn btn-default" onclick="subscribe()">Subscribe
To This Course</button>
<button type="button" id="editbutton" class="btn btn-default" onclick="window.location='editcourse.jsp?courseName=${fn:escapeXml(course_abbreviation)}'">Edit this Course?</button>
<script>
if ("${fn:escapeXml(guest)}" == "true" || "${fn:escapeXml(isLoggedIn)}" == "false") {
document.getElementById("editbutton").style.visibility='hidden';
}
</script>
<br>
<br>
<button type="button" class="btn btn-default" onclick="window.location='${fn:escapeXml(course_eval_link)}'">UT Course Evaluations</button>
<br>
<br>
<button type="button" class="btn btn-default" id=syllabi onclick="window.location='${fn:escapeXml(course_syllabus_link)}'">UT Past Syllabi</button>
<br>
<br>
<div class="row">
<div class="span10">
<div class="col-md-10">
<h4>Description:</h4>
<br>
<p>${fn:escapeXml(course_description)}</p>
</div>
</div>
</div>
<div class="row">
<div class="span3">
<div class="col-md-3">
<h4>Past Professors:</h4>
<br>
<p>${fn:escapeXml(course_professorList)}</p>
</div>
</div>
<div class="span3">
<div class="col-md-3">
<h4>Semesters Taught:</h4>
<br>
<p>${fn:escapeXml(course_semestersTaught)}</p>
</div>
</div>
<div class="span3">
<div class="col-md-3">
<h4>Pre-Requisites:</h4>
<br>
<p>${fn:escapeXml(course_prereq)}</p>
</div>
</div>
</div>
<div id="disqus_thread"></div>
<script type="text/javascript">
var disqus_shortname = 'adviseme'; // required: replace example with your forum shortname
/* * * DON'T EDIT BELOW THIS LINE * * */
(function() {
var dsq = document.createElement('script');
dsq.type = 'text/javascript';
dsq.async = true;
dsq.src = '//' + disqus_shortname + '.disqus.com/embed.js';
(document.getElementsByTagName('head')[0] || document
.getElementsByTagName('body')[0]).appendChild(dsq);
})();
</script>
<noscript>
Please enable JavaScript to view the <a
href="http://disqus.com/?ref_noscript">comments powered by
Disqus.</a>
</noscript>
<a href="http://disqus.com" class="dsq-brlink">comments powered by
<span class="logo-disqus">Disqus</span>
</a>
<script>
if ("${fn:escapeXml(guest)}" == "false") {
console.log('1');
if("${fn:escapeXml(isLoggedIn)}" == "true"){
console.log('2');
document.getElementById("name").innerHTML = "Welcome, ${fn:escapeXml(first)} ${fn:escapeXml(last)}";
document.getElementById("name").href = "manageaccount.jsp";
document.getElementById("pict").href = "manageaccount.jsp";
document.getElementById("profilepic").src = "${fn:escapeXml(pic)}";
document.getElementById("loginbuttonref").setAttribute("onClick","window.location.href='logout.jsp'");
document.getElementById("loginbuttonref").innerHTML = "Logout";
}else{
console.log('3');
document.getElementById("name").innerHTML = "Welcome, Guest";
document.getElementById("name").href = "home.jsp";
document.getElementById("pict").href = "home.jsp";
document.getElementById("profilepic").src = "";
document.getElementById("loginbuttonref").setAttribute("onClick","window.location.href='login.jsp'");
document.getElementById("loginbuttonref").innerHTML = "Login";
}
} else {
console.log('4');
document.getElementById("name").innerHTML = "Welcome, Guest";
document.getElementById("name").href = "home.jsp";
document.getElementById("pict").href = "home.jsp";
document.getElementById("profilepic").src = "";
document.getElementById("loginbuttonref").setAttribute("onClick","window.location.href='login.jsp'");
document.getElementById("loginbuttonref").innerHTML = "Login";
}
</script>
</body>
</html> | 22a0ca6bfa999874f6c2c8a7ab356a8d573c0055 | [
"Java",
"Markdown",
"Java Server Pages"
] | 25 | Java | jason110024/AdviseMe | 23d6edd2b9d978169b53df261b8dd8db3bbb7582 | d153951e518935b32218473223406d8173df3bfc |
refs/heads/master | <file_sep>var lat;
var lon;
var kel, fah, cel;
$(document).ready(function () {
"use strict";
// get current location
if (navigator.geolocation) {
navigator.geolocation.getCurrentPosition(function (position) {
lat = position.coords.latitude;
lon = position.coords.longitude;
});
} else {
window.alert("Sorry, we couldn't receive your location.");
}
// formulate API link
var apiLink = "http://api.openweathermap.org/data/2.5/weather?lat=" + lat + "&lon=" + lon + "&APPID=95e4866e2cad200db2377ea9e9235066";
// request API link
$.getJSON(apiLink, function (json) {
cel = json.main.temp - 273.15;
fah = 1.8 * (json.main.temp - 273) + 32;
$("#temp-number").text(cel);
$("#location").text(json.name);
});
});
function changeToCel(){
document.getElementById("temp-number").innerHTML = cel;
}
function changeToFah(){
document.getElementById("temp-number").innerHTML = Math.round(fah);
}<file_sep># LocalWeather
2nd project from FreeCodeCamp Intermediate FED Challenges. Build together with @MNITD.
| 63c3f38f0d9d70cd958c40d142b2fdf96e5afa4b | [
"Markdown",
"JavaScript"
] | 2 | Markdown | KulykDenys/LocalWeather | 121efcd5aea9ab31d81cdfad3fe4888fd335f21d | 350948125070d9fe4760eb47dcd307477c166be2 |
refs/heads/master | <file_sep># Javascript Challenge : "Data Visualisation"
while coding I encountered issues such as receiving data from the remote server in json and deploying it for graph interaction , fortunately I consulted my fellow colleagues to seek for their advice and I managed to fix the issues after visiting different websites and video clips for different developpers.
#### 1. handling of the DOM:
- [ ok] I was able to find the right selector to do it.
- [ ok ] I was able to inject the graph in the right place on the page via javascript.
- [ ok] I was able to retrieve the html data in a format adapted to my javascript code.
### 2. Request ajax/fetch:
- [ok ] I was able to receive the answer from the remote server in json.
- [ ok] Then, I was able to build a callback function to process this data.
### 3. Use of **third party libraries**:
- [ok ] I was able to integrate the third-party library into my application.
- [ ok] I used the documentation provided by the library.
- [ ok] I was able to generate the 2 inline data graphs.
- [ ok] I was able to generate the "remote data" graph.
### 4. Problem-solving:
- [ok ] Syntactic rigor: I was able to translate the processes I imagined into javascript syntax.
- [ok ] Logical thinking: Through iterations and trial and error, I was able to find a logical path that works to address the issues raised by the client's request. Specifically:
- [ok ] I was able to generate the 2 inline data graphs.
- [ok ] I was able to generate the "remote data" graph.
- [ ok] I was able to build a callback function to process remote data (received via ajax).
- [ ok] I was able to make the realtime graph refresh in real time.
- [ok ] I was able to display the detailed data when I hover the mouse.
### 5. Debugging:
- [ok ] I use the console to understand what is happening and compare what I am trying to program with what the machine is doing.
### 6. Separation of concerns:
- [ no] If I disable javascript, the user experience is satisfactory, the user has access to data and content
- [ok ] If I enable javascript, the tables are enhanced with an interactive graph.
- ![graph](https://user-images.githubusercontent.com/90928514/141811618-2d64798e-3f9a-4b4b-9c5d-43c33b39f48b.png)
<file_sep>/* To inster a canvas into html page at first heading place ("firstheading") */
document
.getElementById("firstHeading")
.insertAdjacentHTML(
"afterend",
'<canvas id="canvas01" width="600" height="300"></canvas>'
);
/* Creating empty arrays */
let dataPoints = [];
let dataLabels = [];
let dataY = [];
let chartLive = null;
/* To retrieve data from the server using the fetch function: fetch function:
For every second, call updateChart method. Each time updateChart is called, it gets data from JSON, adds it to dataPoint and calls chart.render() */
function updateChart() {
fetch(
"https://canvasjs.com/services/data/datapoints.php?xstart=" +
(dataPoints.length + 1) +
"&ystart=" +
dataPoints[dataPoints.length - 1].y +
"&length=1&type=json"
)
// Storing data in form of JSON
.then((res) => res.json())
.then((data) => {
//adding data to dataPoints
data.forEach((value) => {
dataPoints.push({
x: value[0],
y: parseInt(value[1]),
});
});
for (i = 0; i < dataPoints.length; i++) {
dataLabels[i] = dataPoints[i].x;
dataY[i] = dataPoints[i].y;
}
setTimeout(function() {
let ctx = document.getElementById("canvas01");
if (!chartLive) {
chartLive = new Chart(ctx, {
type: "line",
data: {
labels: dataLabels,
datasets: [{
data: dataY,
label: "Crime statistics",
borderColor: randomColor(),
fill: false,
}, ],
},
options: {
title: {
display: true,
text: "Live Chart with dataPoints from External JSON",
responsive: true,
},
},
});
} else {
chartLive.data.labels = dataLabels;
chartLive.data.datasets[0].data = dataY;
chartLive.update();
}
updateChart();
}, 1000);
});
}
function addData(chart, label, data) {
chart.data.labels.push(label);
chart.data.datasets.forEach((dataset) => {
dataset.data.push(data);
});
}
fetch("https://canvasjs.com/services/data/datapoints.php")
.then((res) => res.json())
.then((data) => {
data.forEach((value) => {
dataPoints.push({
x: value[0],
y: parseInt(value[1]),
});
});
for (i = 0; i < dataPoints.length; i++) {
dataLabels[i] = dataPoints[i].x;
dataY[i] = dataPoints[i].y;
}
console.log(dataPoints);
console.log(dataLabels);
console.log(dataY);
updateChart();
});
let table1 = document.getElementById("table1");
table1 = tableToJson(table1);
function tableToJson(table) {
let data = [];
for (i = 1; i < table.rows.length; i++) {
let tableRow = table.rows[i];
let rowData = [];
for (j = 1; j < tableRow.cells.length; j++) {
rowData.push(tableRow.cells[j].innerHTML);
}
data.push(rowData);
}
console.log(data);
return data;
}
function arrayStringToFloat(table) {
let data = [];
for (i = 0; i < table.length; i++) {
table[i] = table[i].replace(",", ".");
data.push(parseFloat(table[i]));
}
return data;
}
function randomColor() {
color =
"rgb(" +
Math.round(Math.random() * 200) +
"," +
Math.round(Math.random() * 200) +
"," +
Math.round(Math.random() * 200) +
")";
return color;
}
const arrayWithoutElementAtIndex = function(arr, index) {
return arr.filter(function(value, arrIndex) {
return index !== arrIndex;
});
};
document
.getElementById("table1")
.insertAdjacentHTML(
"beforebegin",
'<canvas id="canvas1" height="750" width="650"></canvas>'
);
let ctx = document.getElementById("canvas1");
let myChart = new Chart(ctx, {
type: "line",
data: {
labels: table1[0],
datasets: [{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table1[1], 0)),
label: table1[1][0],
borderColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table1[2], 0)),
label: table1[2][0],
borderColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table1[3], 0)),
label: table1[3][0],
borderColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table1[4], 0)),
label: table1[4][0],
borderColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table1[5], 0)),
label: table1[5][0],
borderColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table1[6], 0)),
label: table1[6][0],
borderColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table1[7], 0)),
label: table1[7][0],
borderColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table1[8], 0)),
label: table1[8][0],
borderColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table1[9], 0)),
label: table1[9][0],
borderColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table1[10], 0)),
label: table1[10][0],
borderColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table1[11], 0)),
label: table1[11][0],
borderColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table1[12], 0)),
label: table1[12][0],
borderColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table1[13], 0)),
label: table1[13][0],
borderColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table1[14], 0)),
label: table1[14][0],
borderColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table1[15], 0)),
label: table1[15][0],
borderColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table1[16], 0)),
label: table1[16][0],
borderColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table1[17], 0)),
label: table1[17][0],
borderColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table1[18], 0)),
label: table1[18][0],
borderColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table1[19], 0)),
label: table1[19][0],
borderColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table1[20], 0)),
label: table1[20][0],
borderColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table1[21], 0)),
label: table1[21][0],
borderColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table1[22], 0)),
label: table1[22][0],
borderColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table1[23], 0)),
label: table1[23][0],
borderColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table1[24], 0)),
label: table1[24][0],
borderColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table1[25], 0)),
label: table1[25][0],
borderColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table1[26], 0)),
label: table1[26][0],
borderColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table1[27], 0)),
label: table1[27][0],
borderColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table1[28], 0)),
label: table1[28][0],
borderColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table1[29], 0)),
label: table1[29][0],
borderColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table1[30], 0)),
label: table1[30][0],
borderColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table1[31], 0)),
label: table1[31][0],
borderColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table1[32], 0)),
label: table1[32][0],
borderColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table1[33], 0)),
label: table1[33][0],
borderColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table1[34], 0)),
label: table1[34][0],
borderColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table1[35], 0)),
label: table1[35][0],
borderColor: randomColor(),
},
],
},
options: {
plugins: {
decimation: {
enabled: true,
},
subtitle: {
display: true,
text: "Number in thousand",
},
},
title: {
display: true,
text: "Crimes recorded by the police",
responsive: true,
},
},
});
let table2 = document.getElementById("table2");
table2 = tableToJson(table2);
document
.getElementById("table2")
.insertAdjacentHTML(
"beforebegin",
'<canvas id="canvas2" width="600" height="500" ></canvas>'
);
ctx = document.getElementById("canvas2");
new Chart(ctx, {
type: "bar",
data: {
labels: ["2007-09", "2010-2012"],
datasets: [{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table2[1], 0)),
label: table2[1][0],
backgroundColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table2[2], 0)),
label: table2[2][0],
backgroundColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table2[3], 0)),
label: table2[3][0],
backgroundColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table2[4], 0)),
label: table2[4][0],
backgroundColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table2[5], 0)),
label: table2[5][0],
backgroundColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table2[6], 0)),
label: table2[6][0],
backgroundColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table2[7], 0)),
label: table2[7][0],
backgroundColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table2[8], 0)),
label: table2[8][0],
backgroundColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table2[9], 0)),
label: table2[9][0],
backgroundColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table2[10], 0)),
label: table2[10][0],
backgroundColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table2[11], 0)),
label: table2[11][0],
backgroundColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table2[12], 0)),
label: table2[12][0],
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table2[13], 0)),
label: table2[13][0],
backgroundColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table2[14], 0)),
label: table2[14][0],
backgroundColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table2[15], 0)),
label: table2[15][0],
backgroundColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table2[16], 0)),
label: table2[16][0],
backgroundColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table2[17], 0)),
label: table2[17][0],
backgroundColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table2[18], 0)),
label: table2[18][0],
backgroundColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table2[19], 0)),
label: table2[19][0],
backgroundColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table2[20], 0)),
label: table2[20][0],
backgroundColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table2[21], 0)),
label: table2[21][0],
backgroundColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table2[22], 0)),
label: table2[22][0],
backgroundColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table2[23], 0)),
label: table2[23][0],
backgroundColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table2[24], 0)),
label: table2[24][0],
backgroundColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table2[25], 0)),
label: table2[25][0],
backgroundColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table2[26], 0)),
label: table2[26][0],
backgroundColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table2[27], 0)),
label: table2[27][0],
backgroundColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table2[28], 0)),
label: table2[28][0],
backgroundColor: randomColor(),
},
{
data: arrayStringToFloat(arrayWithoutElementAtIndex(table2[29], 0)),
label: table2[29][0],
backgroundColor: randomColor(),
},
],
},
options: {
title: {
display: true,
text: "Prison population",
responsive: true,
},
},
}); | 2ca6592dee5dd5a262f0a428ff940206ff1a95a9 | [
"Markdown",
"JavaScript"
] | 2 | Markdown | Rajabbarambona/js-data-visualisation | 4962c0725fe32422f07204b35ee557cdb170d4d3 | a0662bd0900e462621dc461cebcd78603d1dccc0 |
refs/heads/master | <file_sep># signzy_frontend_task
open the index.html file in web browser
| d9ba3fa10ae133452ec29e4cde650731687a9811 | [
"Markdown"
] | 1 | Markdown | prasuKalla/signzy_frontend_task | 7c0a2637c7686bbe6727486174a4f752afb02500 | f2bc3f7b944ce109d1d253ccab5e3df59530502d |
refs/heads/master | <file_sep>module View.Users exposing (view)
import Model exposing (Model)
import Types exposing (User)
import Msg exposing (Msg(..))
import Html exposing (Html, text)
import Material.List as List
view : Model -> Html Msg
view model =
List.ul []
(List.map (viewUserRow model) model.users)
viewUserRow : Model -> User -> Html Msg
viewUserRow model user =
List.li []
[ List.content []
[ text user.name ]
]
<file_sep># elm-mdl Dashboard example
This is a Single Page Application Dashboard example using [elm-mdl](https://debois.github.io/elm-mdl/) example. The example is based on [@knewter](https://github.com/knewter)'s [TimeTracker](https://github.com/knewter/time-tracker) application as well as the [Dashboard template](https://getmdl.io/templates/dashboard/index.html)
Live version of the application can be found [here](https://vipentti.github.io/elm-mdl-dashboard/)
## License
MIT Licensed, see LICENSE for more details.
| 7612861a143668230ef6ca067e90620d7fbccad0 | [
"Markdown",
"Elm"
] | 2 | Markdown | vitalyvb/elm-mdl-dashboard | dc6445ac482e95ff04ae653569c3085b82f2dc3e | 9f98cf1ed0450e7dbd020d8611ba0f07c41f7e62 |
refs/heads/master | <repo_name>venqwish/StylusPlugin<file_sep>/src/com/stylusplugin/psi/StylusElementType.java
package com.stylusplugin.psi;
import com.intellij.psi.tree.IElementType;
import com.stylusplugin.StylusLanguage;
import org.jetbrains.annotations.NonNls;
import org.jetbrains.annotations.NotNull;
public class StylusElementType extends IElementType {
public StylusElementType(@NotNull @NonNls String debugName) {
super(debugName, StylusLanguage.INSTANCE);
}
}<file_sep>/src/com/stylusplugin/StylusSyntaxHighlighter.java
package com.stylusplugin;
import com.intellij.lexer.FlexAdapter;
import com.intellij.lexer.Lexer;
import com.intellij.openapi.editor.SyntaxHighlighterColors;
import com.intellij.openapi.editor.colors.TextAttributesKey;
import com.intellij.openapi.editor.markup.TextAttributes;
import com.intellij.openapi.fileTypes.SyntaxHighlighterBase;
import com.intellij.psi.TokenType;
import com.intellij.psi.tree.IElementType;
import com.stylusplugin.psi.StylusTypes;
import org.jetbrains.annotations.NotNull;
import java.awt.*;
import java.io.Reader;
import static com.intellij.openapi.editor.colors.TextAttributesKey.createTextAttributesKey;
/**
* Created with IntelliJ IDEA.
* User: badabing
* Date: 2/14/13
* Time: 10:00 AM
* To change this template use File | Settings | File Templates.
*/
public class StylusSyntaxHighlighter extends SyntaxHighlighterBase {
public static final TextAttributesKey SEPARATOR = createTextAttributesKey("STYLUS_SEPARATOR", SyntaxHighlighterColors.OPERATION_SIGN);
public static final TextAttributesKey KEY = createTextAttributesKey("STYLUS_KEY", SyntaxHighlighterColors.KEYWORD);
public static final TextAttributesKey VALUE = createTextAttributesKey("STYLUS_VALUE", SyntaxHighlighterColors.STRING);
public static final TextAttributesKey COMMENT = createTextAttributesKey("STYLUS_COMMENT", SyntaxHighlighterColors.LINE_COMMENT);
static final TextAttributesKey BAD_CHARACTER = createTextAttributesKey("STYLUS_BAD_CHARACTER",
new TextAttributes(Color.RED, null, null, null, Font.BOLD));
private static final TextAttributesKey[] BAD_CHAR_KEYS = new TextAttributesKey[]{BAD_CHARACTER};
private static final TextAttributesKey[] SEPARATOR_KEYS = new TextAttributesKey[]{SEPARATOR};
private static final TextAttributesKey[] KEY_KEYS = new TextAttributesKey[]{KEY};
private static final TextAttributesKey[] VALUE_KEYS = new TextAttributesKey[]{VALUE};
private static final TextAttributesKey[] COMMENT_KEYS = new TextAttributesKey[]{COMMENT};
private static final TextAttributesKey[] EMPTY_KEYS = new TextAttributesKey[0];
@NotNull
@Override
public Lexer getHighlightingLexer() {
return new FlexAdapter(new StylusLexer((Reader) null));
}
@NotNull
@Override
public TextAttributesKey[] getTokenHighlights(IElementType tokenType) {
if (tokenType.equals(StylusTypes.SEPARATOR)) {
return SEPARATOR_KEYS;
} else if (tokenType.equals(StylusTypes.KEY)) {
return KEY_KEYS;
} else if (tokenType.equals(StylusTypes.VALUE)) {
return VALUE_KEYS;
} else if (tokenType.equals(StylusTypes.COMMENT)) {
return COMMENT_KEYS;
} else if (tokenType.equals(TokenType.BAD_CHARACTER)) {
return BAD_CHAR_KEYS;
} else {
return EMPTY_KEYS;
}
}
}
<file_sep>/src/com/stylusplugin/StylusFileTypeFactory.java
package com.stylusplugin;
/**
* Created with IntelliJ IDEA.
* User: badabing
* Date: 2/12/13
* Time: 8:16 PM
* To change this template use File | Settings | File Templates.
*/
import com.intellij.openapi.fileTypes.FileTypeConsumer;
import com.intellij.openapi.fileTypes.FileTypeFactory;
import org.jetbrains.annotations.NotNull;
public class StylusFileTypeFactory extends FileTypeFactory{
@Override
public void createFileTypes(@NotNull FileTypeConsumer fileTypeConsumer) {
fileTypeConsumer.consume(StylusFileType.INSTANCE, "styl");
}
}
<file_sep>/src/com/stylusplugin/StylusIcons.java
package com.stylusplugin;
/**
* Created with IntelliJ IDEA.
* User: badabing
* Date: 2/12/13
* Time: 8:12 PM
* To change this template use File | Settings | File Templates.
*/
import com.intellij.openapi.util.IconLoader;
import javax.swing.*;
public class StylusIcons {
public static final Icon FILE = IconLoader.getIcon("/com/stylusplugin/icons/stylus.png");
}
<file_sep>/src/com/stylusplugin/psi/StylusFile.java
package com.stylusplugin.psi;
import com.intellij.extapi.psi.PsiFileBase;
import com.intellij.openapi.fileTypes.FileType;
import com.intellij.psi.FileViewProvider;
import com.stylusplugin.StylusFileType;
import com.stylusplugin.StylusLanguage;
import org.jetbrains.annotations.NotNull;
import javax.swing.*;
/**
* Created with IntelliJ IDEA.
* User: badabing
* Date: 2/14/13
* Time: 9:21 AM
* To change this template use File | Settings | File Templates.
*/
public class StylusFile extends PsiFileBase{
public StylusFile(@NotNull FileViewProvider viewProvider) {
super(viewProvider, StylusLanguage.INSTANCE);
}
@NotNull
@Override
public FileType getFileType() {
return StylusFileType.INSTANCE;
}
@Override
public String toString() {
return "Stylus File";
}
@Override
public Icon getIcon(int flags) {
return super.getIcon(flags);
}
}
<file_sep>/README.md
StylusPlugin
============<file_sep>/src/com/stylusplugin/Stylus.flex
package com.stylusplugin;
import com.intellij.lexer.FlexLexer;
import com.intellij.psi.tree.IElementType;
import com.stylusplugin.psi.StylusTypes;
import com.intellij.psi.TokenType;
%%
%class StylusLexer
%implements FlexLexer
%unicode
%function advance
%type IElementType
%eof{ return;
%eof}
CRLF= \n|\r|\r\n
WHITE_SPACE=[\ \t\f]
LINE_TERMINATOR = \r|\n|\r\n
INPUT_CHARACTER = [^\r\n]
NUMBER = [1-9]
COMMENT = {COMMENT_CSS} | {COMMENT_SILENT}
COMMENT_CSS = "/*" [^*] ~"*/"
COMMENT_SILENT = {WHITE_SPACE}* "//" .*
FIRST_VALUE_CHARACTER=[^ \n\r\f\\] | "\\"{CRLF} | "\\".
VALUE_CHARACTER=[^\n\r\f\\] | "\\"{CRLF} | "\\".
SEPARATOR=[::]
KEY_CHARACTER=[^:=\ \n\r\t\f\\] | "\\"{CRLF} | "\\".
%state WAITING_VALUE
%%
<YYINITIAL> {COMMENT} { yybegin(YYINITIAL); return StylusTypes.COMMENT; }
<YYINITIAL> {KEY_CHARACTER}+ { yybegin(YYINITIAL); return StylusTypes.KEY; }
<YYINITIAL> {SEPARATOR} { yybegin(WAITING_VALUE); return StylusTypes.SEPARATOR; }
<WAITING_VALUE> {CRLF} { yybegin(YYINITIAL); return StylusTypes.CRLF; }
<WAITING_VALUE> {WHITE_SPACE}+ { yybegin(WAITING_VALUE); return TokenType.WHITE_SPACE; }
<WAITING_VALUE> {FIRST_VALUE_CHARACTER}{VALUE_CHARACTER}* { yybegin(YYINITIAL); return StylusTypes.VALUE; }
{CRLF} { yybegin(YYINITIAL); return StylusTypes.CRLF; }
{WHITE_SPACE}+ { yybegin(YYINITIAL); return TokenType.WHITE_SPACE; }
. { return TokenType.BAD_CHARACTER; } | 6bd37e497b867866e68cde58d59abca7837c90ae | [
"Java",
"Markdown",
"JFlex"
] | 7 | Java | venqwish/StylusPlugin | 882b032551382f120961ab57fb9f3b97a5df5f9b | 221b40d2ba0da623c18b435fd8a6311790749bf0 |
refs/heads/master | <file_sep># Computer-Networks-Project
在线小票识别系统
本组网络实习的project是实现一个简单的在线小票识别系统,主要目的为输入小票图片,在网页端将关键信息进行提取。
代码总共分为四个部分,扫描收据文本本地化,扫描票据文本识别,关键信息提取,系统网页化
<file_sep>from flask import Flask, render_template, request, redirect, url_for, make_response,jsonify
from werkzeug.utils import secure_filename
import os
import cv2
import time
from datetime import timedelta
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'JPG', 'PNG', 'bmp'])
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
app = Flask(__name__)
app.send_file_max_age_default = timedelta(seconds=1)
# @app.route('/upload', methods=['POST', 'GET'])
@app.route('/', methods=['POST', 'GET'])
def upload():
if request.method == 'POST':
f = request.files['file']
if not (f and allowed_file(f.filename)):
return jsonify({"error": 1001, "msg": "请检查上传的图片类型,仅限于png、PNG、jpg、JPG、bmp"})
user_input = request.form.get("name")
basepath = os.path.dirname(__file__)
print(basepath)
upload_path = os.path.join(basepath, 'static\\images', secure_filename(f.filename))
print(f.filename)
f.save(upload_path)
print(upload_path)
img = cv2.imread(upload_path)
savepath = os.path.join(basepath, 'static\\images', 'tmp.jpg')
cv2.imwrite(savepath, img)
exe1 = "python task1\\SSD_Method\\src\\detect.py --path " + upload_path + " --name " + f.filename
print(exe1)
os.system(exe1)
exe2 = "python task2\\main.py " + "--name " + f.filename
print(exe2)
os.system(exe2)
exe3 = "python task3\\src\\test.py " + "--name " + f.filename
print(exe3)
os.system(exe3)
jsonname = "results\\" + f.filename.split('.')[0]+ '.json'
fout = open(jsonname, 'r')
fout.readline()
v1 = fout.readline()
v2 = fout.readline()
v3 = fout.readline()
v4 = fout.readline()
return render_template('upload_ok.html',userinput=user_input,val1=time.time(), L1 = v1, L2 = v2, L3 = v3, L4 = v4)
return render_template('upload.html')
if __name__ == '__main__':
#app.debug = True
app.run(host='127.0.0.1', port=5000)
<file_sep>import os
import argparse
from torchvision import transforms
from utils import *
from PIL import Image, ImageDraw, ImageFont
device = torch.device("cpu")
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str, default="")
parser.add_argument('--name', type=str, default="")
opt = parser.parse_args()
savename = opt.name.split(".")[0]
# Transforms
resize = transforms.Resize((300, 300))
to_tensor = transforms.ToTensor()
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
# Load model checkpoint
checkpoint ='C:\\Users\\lenovo\\Desktop\\Class\\Network\\Project\\task1\\SSD_Method\\src\\BEST_checkpoint_ssd300.pth.tar'
checkpoint = torch.load(checkpoint)
start_epoch = checkpoint['epoch'] + 1
best_loss = checkpoint['best_loss']
print('\nLoaded checkpoint from epoch %d. Best loss so far is %.3f.\n' % (start_epoch, best_loss))
model = checkpoint['model']
model = model.to(device)
model.eval()
def detect(original_image, min_score, max_overlap, top_k, max_OCR_overlap=1.0, max_OCR_ratio=1.0, suppress=None):
# Transform
image = normalize(to_tensor(resize(original_image)))
# Move to default device
image = image.to(device)
# Forward prop.
predicted_locs, predicted_scores = model(image.unsqueeze(0))
# Detect objects in SSD output
det_boxes, det_labels, det_scores = model.detect_objects(predicted_locs, predicted_scores, min_score=min_score,
max_overlap=max_overlap, top_k=top_k, original_image=original_image, max_OCR_overlap=max_OCR_overlap, max_OCR_ratio=max_OCR_ratio)
# Move detections to the CPU
det_boxes = det_boxes[0].to('cpu')
# Decode class integer labels
det_labels = [rev_label_map[l] for l in det_labels[0].to('cpu').tolist()]
# If no objects found, the detected labels will be set to ['0.'], i.e. ['background'] in SSD300.detect_objects() in model.py
if det_labels == ['background']:
# Just return original image
return original_image
# Annotate
annotated_image = original_image
draw = ImageDraw.Draw(annotated_image)
font = ImageFont.truetype("C:\\Windows\\Fonts\\Sitka.ttc", 15)
# Suppress specific classes, if needed
for i in range(det_boxes.size(0)):
if suppress is not None:
if det_labels[i] in suppress:
continue
# Boxes
box_location = det_boxes[i].tolist()
with open('C:\\Users\\lenovo\\Desktop\\Class\\Network\\Project\\task1\\SSD_Method\\src\\result\\' + savename + '.txt', 'a+') as f:
f.write(str(int(box_location[0])) + ',' + str(int(box_location[1])) + ',')
f.write(str(int(box_location[0])) + ',' + str(int(box_location[3])) + ',')
f.write(str(int(box_location[2])) + ',' + str(int(box_location[1])) + ',')
f.write(str(int(box_location[2])) + ',' + str(int(box_location[3])) + '\n')
#f.write(str(box_location)+'\n')
draw.rectangle(xy=box_location, outline=label_color_map[det_labels[i]])
draw.rectangle(xy=[l + 1. for l in box_location], outline=label_color_map[
det_labels[i]])
del draw
print(annotated_image)
return annotated_image
if __name__ == '__main__':
min_score = 0.1
max_overlap = 0.9
max_OCR_overlap = 0.2
max_OCR_ratio = 1
top_k = 300
img_path = opt.path
original_image = Image.open(img_path, mode='r')
original_image = original_image.convert('RGB')
out_image = detect(original_image, min_score=min_score, max_overlap=max_overlap, top_k=top_k, max_OCR_overlap=max_OCR_overlap, max_OCR_ratio=max_OCR_ratio) # .show()
img_save_path = 'C:\\Users\\lenovo\\Desktop\\Class\\Network\\Project\\task1\\SSD_Method\\src\\result\\' + savename + ".jpg"
out_image.save(img_save_path)
| 0a426885a70ad22c718b7b7dc8a465faf89cdc9a | [
"Markdown",
"Python"
] | 3 | Markdown | PkuDavidGuan/Computer-Networks-Project | eb2040c0949985d162aad6ba58b7fb06d27d8187 | a324b4789516a65cf936b5ae068cc40f7ad169c9 |
refs/heads/master | <repo_name>playerfm/dashboard<file_sep>/README.md
# Dashboard Quickstart
A [TopGap](https://github.com/playerfm/topgap) demo app.
```
$ npm -g install git://github.com/playerfm/topgap.git # install topgap globally
$ git clone https://github.com/playerfm/dashboard.git # clone this repo
$ topgap build dashboard /tmp/native/build
```
Now, go to `/tmp/native/build` directory and run the native desktop app.
![Demo](https://raw.githubusercontent.com/playerfm/dashboard/master/screenshots/2.png)
<file_sep>/src/app.js
window.addEventListener('DOMContentLoaded', function() {
var htmlTotal = document.getElementById('total'),
log = document.getElementById('logs'),
title = document.getElementById('title'),
x = 0,
y = 0,
total = 0,
logs;
/**
* Simple Calc
*
* Save logs history to db.
* Inserts the current total to the logs variable.
*/
function update() {
total = x + y;
htmlTotal.innerHTML = total;
logs.splice(0, 0, total);
updateLogs();
try {
chrome.storage.local.set({"logs": logs}, function(){
// done
});
} catch (err) {
console.log('chrome.* object is undefined. [error message]: ', err.message);
}
}
/**
* Simple Calc
*
* Update the DOM logs
*/
function updateLogs() {
try {
log.innerHTML = logs.join('<br>');
} catch (e) {
console.log('error logs variable. ', e);
}
}
/**
* This will get the title feed from the localStorage; and
* update the DOM.
*/
function setFeedTitle()
{
chrome.storage.local.get('title', function(data) {
title.innerHTML = data.title;
});
}
/**
* This will get the addition logs.
*/
chrome.storage.local.get("logs", function(items) {
logs = items.logs || [];
updateLogs();
setFeedTitle();
});
/**
* ==============================================================
* Events
* ==============================================================
*/
/**
* This listener will be executed every 10 secs
*/
chrome.alarms.onAlarm.addListener(function(alarm) {
if (alarm.name === 'feedUpdate') {
setFeedTitle(); // get title from localStorage and update DOM every 10 seconds
}
});
document.sum.n1.addEventListener('keyup', function(e) {
x = parseInt(this.value) || 0;
update();
});
document.sum.n2.addEventListener('keyup', function(e) {
y = parseInt(this.value) || 0;
update();
});
});
<file_sep>/src/background.js
var config = {
url: 'http://testdata.player.fm/?interval='
, interval: 10 // time interval to grab the RSS in seconds.
};
/**
* This will get the RSS; and
* pass the reponseText to onTextFeed() to parse and save it to localStorage.
*/
function fetchFeed(url, callback) {
var xhr = new window.XMLHttpRequest();
xhr.open('GET', url);
xhr.onload = function(response) {
callback(xhr.responseText);
};
xhr.send();
}
/**
* This will get the first title on rss feed; and
* save it to the localStorage.
*/
function onTextFeed(text) {
try {
var titles = text.match(/<title>(.*?)<\/title>/ig),
latestTitle = titles[1].match(/<title>(.*?)<\/title>/i)[1];
chrome.storage.local.set({'title': latestTitle}, function() {
// done saving
});
} catch (e) {
console.log('onTextFeed[error]: ', e.message);
}
}
/**
* ======================================================
* Event Listeners
* ======================================================
*/
/**
* This listener will be executed every 10 secs
*/
chrome.alarms.onAlarm.addListener(function(alarm) {
if (alarm.name === 'feedUpdate') {
fetchFeed(config.url + config.interval, onTextFeed); // get rss and save every 10 seconds.
}
});
try {
/**
* This is event if fired when app starts.
*/
chrome.app.runtime.onLaunched.addListener(function() {
// Create a new window for the app
chrome.app.window.create('src/index.html', {
width: 520,
height: 350
});
});
/**
* This event is fired when app is installed or updated
*/
chrome.runtime.onInstalled.addListener(function() {
});
/**
* This is event is fired when app is about to unload or close.
*/
chrome.runtime.onSuspend.addListener(function() {
// Do some simple clean-up tasks.
});
} catch (e) {
console.log('error: ', e.message);
}
try {
/**
* browserAction config
*/
chrome.browserAction.onClicked.addListener(function() {
// Create a new window for the app
chrome.windows.create({
url: 'src/index.html',
width: 520,
height: 350,
type: "popup"
}, function(s) {
console.log('window', s);
});
});
} catch (e) {
console.log('error: ', e.message);
}
/**
* ======================================================
* Events
* ======================================================
*/
/**
* Set alarm for every 10 seconds.
*/
chrome.alarms.create('feedUpdate', {
periodInMinutes: (0.0166667 * 10), // 10 seconds
});
| ec17576d61f207892be9d948b2d358f59500d096 | [
"Markdown",
"JavaScript"
] | 3 | Markdown | playerfm/dashboard | ddcade917c515d06ca400277132d27a14228439a | e2567b087914749cce647a430f1cedf741046516 |
refs/heads/master | <repo_name>Maja0108/firstJS<file_sep>/admin.js
let data = [{
"id": 1,
"name": "<NAME>",
"email": "<EMAIL>",
"address": "1111 Budapest"
},
{
"id": 2,
"name": "<NAME>",
"email": "<EMAIL>",
"address": "2000 Szentendre"
}
]
function modify(element) {
console.log(element)
let modifyButton = document.getElementById(element)
let children = modifyButton.children
console.log(children)
let name = children[0].textContent
let email = children[1].textContent
let address = children[2].textContent
console.log(name, email, address)
let modifyTr = document.createElement('td')
let nameInput = document.createElement("input")
let modifyTr2 = document.createElement('td')
let emailInput = document.createElement("input")
let modifyTr3 = document.createElement('td')
let addressInput = document.createElement("input")
let modifyTr4 = document.createElement('td')
let modifyBtnSave = document.createElement('button')
let modifyBtnDelete = document.createElement('button')
modifyButton.appendChild(modifyTr)
modifyTr.appendChild(nameInput)
modifyButton.appendChild(modifyTr2)
modifyTr2.appendChild(emailInput)
modifyButton.appendChild(modifyTr3)
modifyTr3.appendChild(addressInput)
modifyButton.appendChild(modifyTr4)
modifyTr4.appendChild(modifyBtnSave)
modifyTr4.appendChild(modifyBtnDelete)
nameInput.defaultValue = name
emailInput.defaultValue = email
addressInput.defaultValue = address
children[0].style.display = 'none'
children[1].style.display = 'none'
children[2].style.display = 'none'
children[3].style.display = 'none'
modifyBtnSave.className = 'btn btn-group btn-success'
modifyBtnSave.innerHTML = 'Save'
modifyBtnDelete.className = 'btn btn-group btn-danger'
modifyBtnDelete.innerHTML = 'Delete'
modifyBtnSave.id = element
modifyBtnDelete.id = element
modifyBtnSave.addEventListener("click", save)
modifyBtnDelete.addEventListener("click", deleteRow2)
}
function deleteRow(element) {
let deleteButton = document.getElementById(element)
let parent = deleteButton.parentElement
alert('Do you want do delete?')
parent.removeChild(deleteButton)
}
function save(){
console.log(event.target.id)
let buttonId = event.target.id
let saveButton = document.getElementById(buttonId)
let children = saveButton.children
console.log(children)
let newName = children[4].firstChild.value
let newEmail = children[5].firstChild.value
let newAddress = children[6].firstChild.value
console.log(newName, newEmail, newAddress)
}
function deleteRow2() {
let buttonId = event.target.id
let deleteButton = document.getElementById(buttonId)
let parent = deleteButton.parentElement
alert('Do you want do delete?')
parent.removeChild(deleteButton)
}<file_sep>/calculate.js
function calculatePrice(){
alert("boci")
let pricePerPiece = 1200
let amountInput = document.querySelector("form#order input[name=amount]")
let priceField = document.querySelector("form#order .message strong")
//let saucePrice = document.querySelector("form#input fieldset[name=sauce]")
//saucePrice = parseInt(saucePrice.value)
let amount = parseInt(amountInput.value)
let price = amount * pricePerPiece
priceField.innerHTML = `${price}`
}
function calculatePrice2(){
let pricePerPiece = 1200
let amountInput = document.querySelector("form#order input[name=amount]")
let priceField = document.querySelector("form#order .message strong")
let saucePrice = document.querySelector("form#order select[name=sauce]")
let extraPrice = document.querySelector('form#order input[name="extra"]:checked')
saucePrice = parseInt(saucePrice.value)
extraPrice = parseInt(extraPrice.value)
alert(pricePerPiece+saucePrice+extraPrice)
let amount = parseInt(amountInput.value)
let price = amount * (pricePerPiece+saucePrice+extraPrice)
priceField.innerHTML = `${price}`
}
function validate(){
let name = document.querySelector("form#order input[name=name]")
if (/^[a-zA-Z]+$/.test(name)){
alert("Please, write a valid name")
}
let email = document.querySelector("form#order input[name=email]")
if (/^\S+@\S+$/.test(email)){
alert("Please, check your email address.")
}
}
function calculatePrice_cond(){
let pricePerPiece = 1200
let amountInput = document.querySelector("form#order input[name=amount]")
let priceField = document.querySelector("form#order .message strong")
let saucePrice = document.querySelector("form#order select[name=sauce]")
let extraPrice = document.querySelector('form#order input[name="extra"]:checked')
let amount = parseInt(amountInput.value)
if (amount > 10){
alert("Maximum order number is 10")
} else if (amount < 1){
alert("Minimum order number is 1")
} else {
saucePrice = parseInt(saucePrice.value)
extraPrice = parseInt(extraPrice.value)
let price = amount * (pricePerPiece+saucePrice+extraPrice)
priceField.innerHTML = `${price}`
}
}
<file_sep>/weather_widget.js
let temperatures = [11.2, 34, 25, -2, 0, 8, 16]
function weatherWidget() {
let day = document.querySelector("form#widget select[name=day]")
let tempField = document.querySelector("form#widget .temp strong")
let day_value = day.value
//alert("Name of the day: "+day_value)
let day_number = parseFloat(day_value)
temp = temperatures[day_number]
//alert("Temperature of the day:" + temp)
tempField.innerHTML = `${temp}`
}
function dailyProduct() {
let day = document.querySelector("form#widget select[name=day]")
let day_number = parseFloat(day.value)
let text
temp = temperatures[day_number]
textField = document.querySelector("form#widget.widget div.day div.row.weather div.col-md-8 div.daily")
if (temp < 0) {
text = "Our daily product is Hot Chocolate"
} else if (temp >= 0 && temp < 15) {
text = "Our daily product is Hot Tea"
} else if (temp >= 15 && temp < 20) {
text = "Our daily product is Home-made Cookies"
} else if (temp >= 20 && temp < 25) {
text = "Our daily product is Ice Cream"
} else {
text = "Our daily product is Ice cold Limonade"
}
textField.innerHTML = `${text}`
}
function maxTemp() {
let maxField = document.querySelector("div.col.minimum strong")
let maxT = temperatures[0]
for (let i = 0; i < temperatures.length; i++){
if (maxT < temperatures[i]){
maxT = temperatures[i]
}
}
maxField.innerHTML = `${maxT}`
}
function minTemp(){
let minField = document.querySelector("div.col.maximum strong")
let minT = temperatures[0]
for (let i = 0; i < temperatures.length; i++){
if (minT > temperatures[i]){
minT = temperatures[i]
}
}
minField.innerHTML = `${minT}`
}
function averageTemp(){
let avgField = document.querySelector(".average strong")
let avgTemp
let sumTemp = 0
for (let i = 0; i < temperatures.length; i++){
sumTemp += temperatures[i]
}
avgTemp = sumTemp/temperatures.length
avgField.innerHTML = `${avgTemp.toFixed(2)}`
}<file_sep>/README.md
# firstJS
My first frontend trials
<file_sep>/db.js
let fetchInit = {
method:"GET",
headers: new Headers(),
mode: "cors",
cache: "default"
};
const fetchData = fetch("http://localhost:3000/users", fetchInit);
fetchData.then(data => data.json()).then(data => console.log(data))
let putInit =
{
method:"GET",
headers: new Headers(),
mode: "cors",
cache: "default"
}; | 20c91414dcacc512d1a36898764ab0b42b89692d | [
"Markdown",
"JavaScript"
] | 5 | Markdown | Maja0108/firstJS | 3790b6671367cd406bb22a0c026e4789f442c9c2 | 8439b5f735e579c3954a31d7d97a403fd68b5092 |
refs/heads/main | <repo_name>PalomaTamayo/RMI<file_sep>/ArchivosRMI/Server/src/server/RMIInterface.java
package server;
public interface RMIInterface {
}
<file_sep>/ArchivosRMI/Interface/src/rmiinterface/RMIInterface.java
package rmiinterface;
import java.rmi.Remote;
import java.rmi.RemoteException;
import java.io.File;
import java.io.FileFilter;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
public interface RMIInterface extends Remote {
public String helloTo(String name) throws RemoteException;
public ArrayList<String> obtenerArchivos(String ruta) throws RemoteException;
}
| 73c050521090389a0ea8f1341ee3df6bfd9050b1 | [
"Java"
] | 2 | Java | PalomaTamayo/RMI | a615159b61cf994d84426c88c7f77a28cb6dd838 | b7152c585cddc3980ef23a63e0895e9085ec17eb |
refs/heads/master | <repo_name>EttyDaniel/json_the_cat<file_sep>/breedFetcher.js
const request = require('request');
const args = process.argv.slice(2);
// Make sure that the user provided at least one argument
if (args[0] === undefined) {
console.log("Error: Incorrect usage, a single argument ('breed') is required");
process.exit();
}
request(`https://api.thecatapi.com/v1/breeds/search?name=${args[0]}`, (error, response, body) => {
// An error has occured , print it and exit
if (error !== null) {
console.log("The program has encountred an error:");
console.log(error);
process.exit();
}
const data = JSON.parse(body);
// Making sure the reply contains something
if (data === null || data === undefined) {
console.log("Error: received a null response");
} else if (data.length === 0) {
// The response is empty, breed was not found
console.log("Error:Breed (" + args[0] + ") provided as argument was not found!");
} else {
// We've got something, print the description
console.log("Breed (" + args[0] + "):" + data[0].description);
}
}); | 2dc241904349e4f4c9e926593464dad7f276e3c5 | [
"JavaScript"
] | 1 | JavaScript | EttyDaniel/json_the_cat | 4abad2f4ef4679e931341b4c39eaf54e1c3d062b | 508cb923303eb090535d2b1fe48ad1dd0be5a3a4 |
refs/heads/master | <repo_name>chadthan/CrashPad<file_sep>/controllers/PadController.js
var Pad = require('../models/Pad');
module.exports = {
get: function(params, isRaw, completion){
Pad.find(params, function(err, pads){
if (err){
completion(err, null);
return;
}
if (isRaw == true){
completion(null, pads);
return;
}
var list = [];
for (var i=0; i<pads.length; i++){
var pad = pads[i];
list.push(pad.summary());
}
completion(null, list);
});
},
getById: function(id, completion){
Pad.findById(id, function(err, pad){
if (err){
completion({
message:'Pad '+id+' not found'
}, null);
return;
}
if (pad == null){
completion({
message:'Pad '+id+' not found'
}, null);
return;
}
completion(null, pad.summary());
});
},
post: function(params, completion){
Pad.create(params, function(err, pad){
if (err){
completion(err, null);
return;
}
completion(null, pad.summary());
return;
});
},
put: function(id, params, completion){
Pad.findByIdAndUpdate(id, params, {new:true}, function(err, pad){
if (err){
completion(err, null);
return;
}
completion(null, pad.summary());
});
}
}<file_sep>/views/partials/footer.mustache
<div class="container footer-middle">
<div class="wide-separator-line bg-mid-gray no-margin-lr margin-three no-margin-bottom"></div>
<div class="row margin-four no-margin-bottom">
<div class="col-md-6 col-sm-12 sm-text-center sm-margin-bottom-four">
<!-- link -->
<ul class="list-inline footer-link text-uppercase">
<li><a href="about-us.html">About</a></li>
<li><a href="blog-masonry-3columns.html">Blog</a></li>
<li><a href="careers.html">Careers</a></li>
<li><a href="testimonials.html">Testimonials</a></li>
<li><a href="contact-us.html">Contact</a></li>
</ul>
<!-- end link -->
</div>
<div class="col-md-6 col-sm-12 footer-social text-right sm-text-center">
<!-- social media link -->
<a target="_blank" href="https://www.facebook.com/"><i class="fa fa-facebook"></i></a>
<a target="_blank" href="https://twitter.com/"><i class="fa fa-twitter"></i></a>
<a target="_blank" href="https://www.linkedin.com/"><i class="fa fa-linkedin"></i></a>
<!-- end social media link -->
</div>
</div>
</div>
<div class="container-fluid bg-dark-gray footer-bottom">
<div class="container">
<div class="row margin-three">
<!-- copyright -->
<div class="col-md-6 col-sm-6 col-xs-12 copyright text-left letter-spacing-1 xs-text-center xs-margin-bottom-one">
© 2016 Powered by CrashPad LLC
</div>
<!-- end copyright -->
<!-- logo -->
<div class="col-md-6 col-sm-6 col-xs-12 footer-logo text-right xs-text-center">
<a href="index.html">
<img src="images/logo-light-gray.png" alt="" />
</a>
</div>
<!-- end logo -->
</div>
</div>
</div>
<!-- scroll to top -->
<!-- scroll to top End... --><file_sep>/routes/account.js
var express = require('express');
var router = express.Router();
var ProfileController = require('../controllers/ProfileController');
var bcrypt = require('bcrypt');
function createErrorObject(msg){
var error = {
confirmation: 'fail',
message: msg
}
return error;
}
router.get('/:action', function(req, res, next) {
var action = req.params.action;
if (action == 'logout'){
req.session.reset();
res.json({
confirmation:'success',
message:'BYE!'
});
return;
}
if (action == 'currentuser'){
if (req.session == null){
res.json(createErrorObject('User not logged in.'));
return;
}
if (req.session.user == null){
res.json(createErrorObject('User not logged in.'));
return;
}
ProfileController.getById(req.session.user, function(err, result){
if (err){
res.json(createErrorObject(err.message));
return;
}
res.json({
confirmation:'success',
currentuser: result
});
return;
});
return;
}
res.json({
confirmation:'fail',
message: 'Invalid Action: '+action
});
});
router.post('/:action', function(req, res, next) {
var action = req.params.action;
if (action == 'login'){
var loginCredentials = req.body;
var email = loginCredentials.email.toLowerCase();
// find the profile with that email:
ProfileController.get({email:email}, true, function(err, results){
if (err){
res.json(createErrorObject(err.message));
return;
}
if (results.length == 0){
res.json(createErrorObject('User not found.'));
return;
}
var user = results[0]; // take most recent user
var passwordCorrect = bcrypt.compareSync(loginCredentials.password, user.password);
if (passwordCorrect == false){
res.json(createErrorObject('Incorrect password'));
return;
}
req.session.user = user._id; // user logged in - install session to track:
res.json({
confirmation:'success',
profile: user.summary()
});
return;
});
return;
}
res.json({
confirmation:'fail',
message: 'Invalid Action: '+action
});
});
module.exports = router;
<file_sep>/views/index.mustache
<!doctype html>
<html class="no-js" lang="en">
<head>
{{>head}}
</head>
<body>
<!-- navigation panel -->
<nav class="navbar navbar-default navbar-fixed-top nav-transparent overlay-nav sticky-nav nav-white nav-dark-transparent" role="navigation">
<div class="container">
<div class="row">
<!-- logo -->
<div class="col-md-2 pull-left">
<a class="logo-light" href="/">
<img alt="" src="images/logo-white.png" class="logo" />
</a>
<a class="logo-dark" href="/">
<img alt="" src="images/logo-light.png" class="logo" />
</a>
</div>
<!-- end logo -->
<!-- toggle navigation -->
<div class="navbar-header col-sm-8 col-xs-2 pull-right">
<button type="button" class="navbar-toggle" data-toggle="collapse" data-target=".navbar-collapse"> <span class="sr-only">Toggle navigation</span> <span class="icon-bar"></span> <span class="icon-bar"></span> <span class="icon-bar"></span> </button>
</div>
<!-- toggle navigation end -->
<!-- main menu -->
<div class="col-md-10 no-padding-right accordion-menu text-right">
<div class="navbar-collapse collapse">
<ul id="accordion" class="nav navbar-nav navbar-right panel-group">
<!-- menu item -->
<li class="dropdown panel">
<a href="/">Home <i class="fa fa-angle-down"></i></a>
</li>
<li class="dropdown panel simple-dropdown">
<a href="#collapse7" class="dropdown-toggle collapsed" data-toggle="collapse" data-parent="#accordion" data-hover="dropdown">Pads <i class="fa fa-angle-down"></i></a>
<!-- sub menu single -->
<!-- sub menu item -->
<ul id="collapse7" class="dropdown-menu panel-collapse collapse" role="menu">
<li><a href="/pads">New York</a></li>
<li><a href="/pads">Chicago</a></li>
<li><a href="/pads">Los Angeles</a></li>
</ul>
<!-- end sub menu item -->
<!-- end sub menu single -->
</li>
<li class="dropdown panel">
<a href="/register">Join <i class="fa fa-angle-down"></i></a>
</li>
</ul>
</div>
</div>
<!-- end main menu -->
</div>
</div>
</nav>
<!-- end navigation panel -->
<!-- parallax -->
<section class="parallax1 parallax-fix full-screen" style="max-height:660px" >
<img class="parallax-background-img" src="/images/nycrooftop.jpg" alt="" />
<div class="container full-screen position-relative">
<div class="row" >
<div class="col-md-5">
<form class="signup" action="/api/profile" method="post">
<p>
Sign up for a sneak preview when <br />we launch this Spring!
</p>
<hr />
<input type="text" name="name" placeholder="First Name" /><br />
<input type="text" name="city" placeholder="City" /><br />
<input type="text" name="email" placeholder="Email" /><br />
<input type="text" name="password" placeholder="<PASSWORD>" /><br />
<button class="btn btn-success no-margin-bottom" type="submit">Register</button>
</form>
</div>
<div class="col-md-7">
<div class="slider-typography">
<div class="slider-text-middle-main">
<div class="slider-text-bottom slider-text-middle5 text-left animated fadeInUp">
<span class="slider-title-big5 white-text">Need a place to crash tonight?</span>
<span class="slider-subtitle5 white-text">Find nearby places <br> to crash!<br></span><br>
<div class="separator-line bg-yellow no-margin-lr no-margin-top xs-margin-bottom-ten"></div>
<span class="slider-title-big5 white-text">Have a spare room?</span>
<span class="slider-subtitle5 white-text">Earn some extra <br> Cash Tonight!<br></span><br>
</div>
</div>
</div>
</div>
</div>
</div>
</section>
<!-- end parallax -->
<!-- about section -->
<section class="no-padding-bottom wow fadeIn">
<div class="container">
<div class="row">
<div class="col-md-6 col-sm-10 text-center center-col">
<span class="margin-five no-margin-top display-block letter-spacing-2">EST. 2015</span>
<h1>An overnight stay marketplce with local hosts.</h1>
<p class="text-med width-90 center-col margin-seven no-margin-bottom"> We are a marketplace for last-minute stay where you can stay overnight with local hosts. .</p>
</div>
</div>
</div>
</section>
<!-- end about section -->
<!-- key person section -->
<section id="Current-Pads" class="wow fadeIn">
<div class="container">
<div class="row">
<!-- section title -->
<div class="col-md-12 text-center">
<h3 class="section-title no-padding-bottom">Current Pads</h3>
</div>
<!-- end section title -->
<div class="col-md-5 col-sm-10 center-col text-center margin-six wow fadeIn">
<h4 class="gray-text">Available Pads near you. </h4>
</div>
</div>
<div class="row margin-six">
<!-- key person item -->
<div class="col-md-4 col-xs-4 bottom-margin text-center">
<div class="key-person">
<div class="key-person-img"><img src="images/livingroom.jpg" alt=""></div>
<div class="key-person-details">
<span class="person-name black-text">Midtown East</span> <span class="person-post">$200</span>
<div class="separator-line bg-yellow"></div>
<p>Livingroom with a private bathroom!.</p>
</div>
</div>
</div>
<div class="col-md-4 col-xs-4 bottom-margin text-center">
<div class="key-person">
<div class="key-person-img"><img src="images/bed.jpg" alt=""></div>
<div class="key-person-details">
<span class="person-name black-text">Fashion District</span> <span class="person-post">$225</span>
<div class="separator-line bg-yellow"></div>
<p>Easy access to uptown or downtown</p>
</div>
</div>
</div>
<div class="col-md-4 col-xs-4 text-center">
<div class="key-person">
<div class="key-person-img"><img src="images/patio.jpg" alt=""></div>
<div class="key-person-details">
<span class="person-name black-text">Chelsea</span> <span class="person-post">$250</span>
<div class="separator-line bg-yellow"></div>
<p>Large living room space with patio.</p>
</div>
</div>
</div>
</div>
</div>
</section>
<!-- end key person section -->
<section>
<div class="container">
<div class="row">
<div class="col-md-12 col-sm-12 center-col text-center">
<a href="/register" class="btn-success btn btn-large button button-desc button-3d btn-round">Join Us<span>Be Part of CrashPad Community</span></a>
</div>
</div>
</div>
</section>
<!-- style="padding:40px 0px 40px 0px;text-align:center"-->
<!-- footer -->
{{>footer}}
{{>scripts}}
</body>
</html>
<file_sep>/views/account.mustache
<!doctype html>
<html class="no-js" lang="en">
<head>
{{>head}}
</head>
<body>
<!-- navigation panel -->
<nav class="navbar navbar-default navbar-fixed-top nav-transparent overlay-nav sticky-nav nav-white nav-dark-transparent" role="navigation">
<div class="container">
<div class="row">
<!-- logo -->
<div class="col-md-2 pull-left">
<a class="logo-light" href="/">
<img alt="" src="images/logo-white.png" class="logo" />
</a>
<a class="logo-dark" href="/">
<img alt="" src="images/logo-light.png" class="logo" />
</a>
</div>
<!-- end logo -->
<!-- toggle navigation -->
<div class="navbar-header col-sm-8 col-xs-2 pull-right">
<button type="button" class="navbar-toggle" data-toggle="collapse" data-target=".navbar-collapse"> <span class="sr-only">Toggle navigation</span> <span class="icon-bar"></span> <span class="icon-bar"></span> <span class="icon-bar"></span> </button>
</div>
<!-- toggle navigation end -->
<!-- main menu -->
<div class="col-md-10 no-padding-right accordion-menu text-right">
<div class="navbar-collapse collapse">
<ul id="accordion" class="nav navbar-nav navbar-right panel-group">
<!-- menu item -->
<li class="dropdown panel">
<a href="/">Home <i class="fa fa-angle-down"></i></a>
</li>
<li class="dropdown panel simple-dropdown">
<a href="#collapse7" class="dropdown-toggle collapsed" data-toggle="collapse" data-parent="#accordion" data-hover="dropdown">Pads <i class="fa fa-angle-down"></i></a>
<!-- sub menu single -->
<!-- sub menu item -->
<ul id="collapse7" class="dropdown-menu panel-collapse collapse" role="menu">
<li><a href="/pads">New York</a></li>
<li><a href="/pads">Chicago</a></li>
<li><a href="/pads">Los Angeles</a></li>
</ul>
<!-- end sub menu item -->
<!-- end sub menu single -->
</li>
<li class="dropdown panel">
<a href="/register">Join <i class="fa fa-angle-down"></i></a>
</li>
</ul>
</div>
</div>
<!-- end main menu -->
</div>
</div>
</nav>
<!--end navigation panel -->
<!-- head section -->
<section class="wow fadeIn border-top">
<div class="container">
<div class="row">
<div class="col-md-7 col-sm-10 center-col text-center margin-ten no-margin-top xs-margin-bottom-seven">
<h6 class="no-margin-top margin-ten xs-margin-bottom-seven"><strong class="black-text">User Account Info</strong></h6>
</div>
</div>
<div class="row">
<!-- tab -->
<div class="col-md-12 col-sm-12 center-col text-center" id="animated-tab">
<!-- tab navigation -->
<ul class="nav nav-tabs margin-five no-margin-top">
<li class="nav active"><a href="#tab6_sec1" data-toggle="tab"><span><i class="icon-tools"></i></span></a><h5>Profile</h5></li>
<li class="nav"><a href="#tab6_sec2" data-toggle="tab"><span><i class="icon-camera"></i></span></a><h5>Listings</h5></li>
<li class="nav"><a href="#tab6_sec3" data-toggle="tab"><span><i class="icon-target"></i></span></a><h5>Billing Info</h5></li>
<li class="nav"><a href="#tab6_sec4" data-toggle="tab"><span><i class="icon-laptop"></i></span></a><h5>Setting</h5></li>
</ul>
<!-- end tab navigation -->
<!-- tab content section -->
<div class="tab-content">
<!-- tab content -->
<div id="tab6_sec1" class="col-md-9 col-sm-12 text-center center-col tab-pane fade in active">
<div class="tab-pane fade in">
<div class="row">
<div class="col-md-6 col-sm-12 text-left gray-text">
<h5>Profile</h5>
<div class="separator-line bg-yellow no-margin-lr sm-margin-five"></div>
<p class="text-large margin-five margin-right-ten">Introduce yourself to CrashPad Community</p>
</div>
<div class="col-md-6 col-sm-12 text-left gray-text">
<form>
<div class="form-group">
<label for="exampleInputFirstName1">First Name</label>
<input type="email" class="form-control input-round big-input text-lowercase" id="exampleInputFirstName1" placeholder="First Name">
</div>
<div class="form-group">
<label for="exampleInputCity1">City</label>
<input type="password" class="form-control input-round big-input text-lowercase" id="exampleInputCity1" placeholder="City">
</div>
<div class="form-group">
<label for="exampleInputCity1">Email</label>
<input type="password" class="form-control input-round big-input text-lowercase" id="exampleInputEmail1" placeholder="Email">
</div>
<div class="form-group">
<label for="exampleInputFile">Password</label>
<input type="file" id="exampleInputPassword" class="input-round big-input text-lowercase" placeholder="<PASSWORD>">
</div>
<div class="checkbox">
<label><input type="checkbox">Terms & Condition </label>
</div>
<button class="btn btn-black btn-small btn-round no-margin-bottom" type="submit">Submit</button>
</form>
</div>
</div>
</div>
</div>
<!-- end tab content -->
<!-- tab content -->
<div id="tab6_sec2" class="col-md-9 col-sm-12 text-center center-col tab-pane fade in">
<div class="tab-pane fade in">
<div class="row">
<div class="col-md-6 col-sm-12 text-left gray-text">
<h5>Listings</h5>
<div class="separator-line bg-yellow no-margin-lr sm-margin-five"></div>
<p class="text-large margin-five margin-right-ten">Showing your available Pads for community</p>
</div>
<div class="col-md-6 col-sm-12 text-left text-med gray-text">
<form>
<div class="form-group">
<label for="exampleInputLocation1">Address</label>
<input type="email" class="form-control input-round big-input text-lowercase" id="exampleInputAddress1" placeholder="Enter Location">
</div>
<div class="form-group">
<label for="exampleInputCity1">City</label>
<input type="password" class="form-control input-round big-input text-lowercase" id="exampleInputCity1" placeholder="City">
</div>
<div class="form-group">
<label for="exampleInputCity1">Rate</label>
<input type="password" class="form-control input-round big-input text-lowercase" id="exampleInputRate1" placeholder="Rate">
</div>
<div class="form-group">
<label for="exampleInputFile">Image</label>
<input type="file" id="exampleInputFile" class="input-round big-input text-lowercase">
</div>
<div class="checkbox">
<label><input type="checkbox">Terms & Condition </label>
</div>
<button class="btn btn-black btn-small btn-round no-margin-bottom" type="submit">Submit</button>
</form>
</div>
</div>
</div>
</div>
<!-- end tab content -->
<!-- tab content -->
<div id="tab6_sec3" class="col-md-9 col-sm-12 text-center center-col tab-pane fade in">
<div class="tab-pane fade in">
<div class="row">
<div class="col-md-6 col-sm-12 text-left gray-text">
<h5>Billing Info</h5>
<div class="separator-line bg-yellow no-margin-lr sm-margin-five"></div>
<p class="text-large margin-five margin-right-ten">How do you want to get pay and take care your bills</p>
</div>
<div class="col-md-6 col-sm-12 text-left text-med gray-text">
<form>
<div class="form-group">
<label for="exampleInputLocation1">Address</label>
<input type="email" class="form-control input-round big-input text-lowercase" id="exampleInputAddress1" placeholder="Enter Location">
</div>
<div class="form-group">
<label for="exampleInputCity1">City</label>
<input type="password" class="form-control input-round big-input text-lowercase" id="exampleInputCity1" placeholder="City">
</div>
<div class="form-group">
<label for="exampleInputCity1">Rate</label>
<input type="password" class="form-control input-round big-input text-lowercase" id="exampleInputRate1" placeholder="Rate">
</div>
<div class="form-group">
<label for="exampleInputFile">Image</label>
<input type="file" id="exampleInputFile" class="input-round big-input text-lowercase">
</div>
<div class="checkbox">
<label><input type="checkbox">Terms & Condition </label>
</div>
<button class="btn btn-black btn-small btn-round no-margin-bottom" type="submit">Submit</button>
</form>
</div>
</div>
</div>
</div>
<!-- end tab content -->
<!-- tab content -->
<div id="tab6_sec4" class="col-md-9 col-sm-12 text-center center-col tab-pane fade in">
<div class="tab-pane fade in">
<div class="row">
<div class="col-md-6 col-sm-12 text-left gray-text">
<h5>Setting</h5>
<div class="separator-line bg-yellow no-margin-lr sm-margin-five"></div>
<p class="text-large margin-five margin-right-ten">A first impression can make or break you. we can help you find the precise message to clearly.</p>
</div>
<div class="col-md-6 col-sm-12 text-left text-med gray-text">
<form>
<div class="form-group">
<label for="exampleInputLocation1">Location</label>
<input type="email" class="form-control input-round big-input text-lowercase" id="exampleInputLocation1" placeholder="Enter Location">
</div>
<div class="form-group">
<label for="exampleInputCity1">City</label>
<input type="password" class="form-control input-round big-input text-lowercase" id="exampleInputCity1" placeholder="City">
</div>
<div class="form-group">
<label for="exampleInputFile">File input</label>
<input type="file" id="exampleInputFile" class="input-round big-input text-lowercase">
</div>
<div class="checkbox">
<label><input type="checkbox">Terms & Condition </label>
</div>
<button class="btn btn-black btn-small btn-round no-margin-bottom" type="submit">Submit</button>
</form>
</div>
</div>
</div>
</div>
</div>
</div>
<!-- end tab -->
</div>
</div>
</section>
<!-- end content section -->
{{>footer}}
{{>scripts}}
</body>
</html>
| 56469acb82aa712ffec0d403921c4f3e7c11284a | [
"Mustache",
"JavaScript"
] | 5 | Mustache | chadthan/CrashPad | d430e64e0ab5c32988e648e9d4e25dd77ddc0760 | fe8235ec2cc98e2396240c928dd3defd0ca178d9 |
refs/heads/master | <repo_name>Prince10934/color-flipper<file_sep>/hex.js
const colors = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, "A", "B", "C", "D", "E", "F"];
const color = document.getElementById("color");
const main = document.getElementById("main");
const btn = document.getElementById("btn");
function getRandomNumber() {
return Math.floor(Math.random() * colors.length);
}
btn.addEventListener("click", () => {
let str = "#";
for (let i = 0; i < 6; i++) {
str += colors[getRandomNumber()];
}
color.innerText = str;
main.style.backgroundColor = str;
});
<file_sep>/app.js
const colors = [
"red",
"blue",
"rgba(120,120,120,0.5)",
"#9e9e9e",
"hsl(180,100%,75%)",
"rgb(180,180,0)",
];
const color = document.getElementById("color");
const main = document.getElementById("main");
const btn = document.getElementById("btn");
function getRandomNumber() {
return Math.floor(Math.random() * colors.length);
}
btn.addEventListener("click", () => {
const rColor = colors[getRandomNumber()];
main.style.backgroundColor = rColor;
color.innerText = rColor;
});
<file_sep>/_navbar.scss
.navbar {
display: flex;
padding: 1rem;
justify-content: space-evenly;
align-items: center;
font-size: 1.45rem;
height: 65px;
box-shadow: 0 3px 3px 0 $light-grey;
@media (max-width: 400px) {
flex-direction: column;
min-height: 100px;
}
&__logo {
color: $light-blue;
font-weight: bold;
}
&__links {
@media (max-width: 400px) {
padding-top: 1rem;
}
ul {
display: flex;
list-style: none;
li {
a {
padding: 0 1rem;
text-decoration: none;
color: $dark-grey;
display: block;
&:hover {
color: $light-blue;
transform: scale(1.1);
font-weight: bold;
}
}
}
}
}
}
| cc6863f060e095e716e3c8466e99aaab9b6637c1 | [
"SCSS",
"JavaScript"
] | 3 | SCSS | Prince10934/color-flipper | 03c6686b437e89947451bda781ce7aad6d410f82 | 670143e2160573488929d5a23409335bb73eaf96 |
refs/heads/master | <repo_name>azuredan/blog<file_sep>/source/_posts/about.md
title: about
date: 2015-06-04 17:08:37
tags:
---
## About us
私たちはAzureをお使いになられている方々への情報提供のため、
当ブログにて記事を公開することと致しました。
また、私たちは仲間を募集しております。
是非、記事を投稿してみたいという方はお知らせください。記事を投稿していただけた場合、
投稿された記事に製品やサービスの宣伝を含めて頂くことができます。
自社の宣伝や得意分野のアピールをかねて、情報共有へご協力頂けますよう願い致します。
| 02f9519189840d49befc4995ba5f65038c1ada20 | [
"Markdown"
] | 1 | Markdown | azuredan/blog | 90a07d4a26f4fb746dd3f617795a5139b609543e | edb8e3e2551d070cee599ea9ee78dd967c1caf50 |
refs/heads/master | <file_sep>/* BulletConfig filed desic: (5 fileds)
id: string
bulletName: string
killValue: int
speed: int
coinCost: int
*/
var filed_data = {
key_1: ["1", "bullet", 100, 100, 100, ],
key_2: ["2", "bullet", 120, 100, 100, ],
key_3: ["3", "bullet", 140, 100, 100, ],
key_4: ["4", "bullet", 160, 100, 100, ],
key_5: ["5", "bullet", 170, 100, 100, ],
key_6: ["6", "bullet", 180, 100, 100, ],
key_7: ["7", "bullet", 190, 100, 100, ],
key_8: ["8", "bullet", 200, 100, 100, ],
key_9: ["9", "bullet", 210, 100, 100, ],
key_10: ["10", "bullet", 220, 100, 100, ],
key_11: ["11", "bullet", 230, 100, 100, ],
key_12: ["12", "bullet", 240, 100, 100, ],
key_13: ["13", "bullet", 250, 100, 100, ],
key_14: ["14", "bullet", 260, 100, 100, ],
key_15: ["15", "bullet", 270, 100, 100, ],
key_16: ["16", "bullet", 280, 100, 100, ],
key_17: ["17", "bullet", 290, 100, 100, ],
key_18: ["18", "bullet", 300, 100, 100, ],
key_19: ["19", "bullet", 310, 100, 100, ],
key_20: ["20", "bullet", 320, 100, 100, ],
total_count: 20
};
function get_record(id) {
var key = "key_" + id;
var record_array = filed_data[key];
if(!record_array) {
return null;
}
var record = {
id: record_array[0],
bulletName: record_array[1],
killValue: record_array[2],
speed: record_array[3],
coinCost: record_array[4],
};
return record;
}
var BulletConfig = {
filed_data_array: filed_data,
get_record: get_record,
};
module.exports = BulletConfig;<file_sep>import UI_ctrl from "../../managers/UI_ctrl";
import game_app from "../game_app";
export default class LoginUI_ctrl extends UI_ctrl {
constructor() {
super();
// 更多参数说明请访问: https://ldc2.layabox.com/doc/?nav=zh-as-2-4-0
}
onAwake() {
super.onAwake();
}
onStart() {
// this.view["center/desic"].text = "version";
this.view["center/start_button"].on(Laya.Event.CLICK, this, this.on_start_click);
}
on_start_click() {
game_app.Instance.enter_choose_scene();
}
onEnable() {
}
onDisable() {
}
}<file_sep>import res_mgr from "../managers/res_mgr";
import fish_nav from "./fish_nav";
import fish_AI from "./fish_AI";
import fish_data from "./fish_data";
var ObjectType = require("./ObjectType");
export default class farm_mgr extends Laya.Script {
constructor() {
super();
// 更多参数说明请访问: https://ldc2.layabox.com/doc/?nav=zh-as-2-4-0
}
onAwake() {
this.fish_root = this.owner.getChildByName("fish_root");
this.farm_bg = this.owner.getChildByName("bg"); //
this.farm_bg.layer = ObjectType.BG;
this.farm_bg.meshRenderer.sharedMaterial.renderQueue = 999;
this.fish_render_queue = 1000;
}
onStart() {
}
remove_all_fishes() {
this.fish_root.destroyChildren();
}
create_fish(r) {
var fish_prefab = res_mgr.Instance.get_sprite3D_res("res/prefabs3D/LayaScene_fishes/Conventional/" + r.fishName + ".lh");
if (!fish_prefab) {
console.log("error: fish has no prefab: " + r.fishName);
return null;
}
var fish = Laya.Sprite3D.instantiate(fish_prefab);
var scale = r.scale / 1000;
scale = scale * 0.3; // 之前unity项目 15, 5---> 0.3;
fish.transform.localScale = new Laya.Vector3(scale, scale, scale);
fish.layer = ObjectType.FISH;
var anim = fish.getChildByName("anim");
var animator = fish.getChildByName("anim").getComponent(Laya.Animator);
var anim_state = animator.getDefaultState();
if (!anim_state) {
console.log("error fish: " + r.fishName + " has no anim");
}
else {
anim_state.clip.islooping = true;
}
var body = anim.getChildByName("body");
body.skinnedMeshRenderer.sharedMaterial.renderQueue = this.fish_render_queue;
this.fish_root.addChild(fish);
this.fish_render_queue ++;
return fish;
}
config_fish_nav(roads, r, fish) {
var nav = fish.addComponent(fish_nav);
nav.speed = Math.floor(r.min_speed + Math.random() * (r.max_speed - r.min_speed)) / 1000;
nav.speed = nav.speed * 0.3;
var revert = (r.isRevert == 1) ? true : false;
var index = r.roadIndex % roads.length; // 参数的合法性;
if (r.isRandom === 1) { // 随机选着一条路径在渔场里面游动;
nav.walk_on_road(roads, index, revert, r.genTime);
}
else { // 只又一条;
nav.walk_on_road([roads[index]], 0, revert, r.genTime);
}
}
find_nearest_fish(ref_dst) {
var min_len = 1000000.0;
var min_fish = null;
for(var i = 0; i < this.fish_root.numChildren; i ++) {
var fish = this.fish_root.getChildAt(i);
var x_offset = ref_dst.x - fish.transform.position.x;
var y_offset = ref_dst.y - fish.transform.position.y;
var len = x_offset * x_offset + y_offset * y_offset; // len ^ 2;
if (len < min_len) {
min_len = len;
min_fish = fish;
}
}
if (min_len <= 0.3 && min_fish !== null) {
var pos = min_fish.transform.position;
ref_dst.x = pos.x;
ref_dst.y = pos.y;
ref_dst.z = pos.z;
console.log(min_fish.name);
return true;
}
return false;
}
gen_fishes(roads, config_model) {
this.fish_render_queue = 1000;
for(var i = 0; i < config_model.filed_data_array.total_count; i ++) {
var r = config_model.get_record(i + 1);
var fish = this.create_fish(r);
if (!fish) {
continue;
}
// fish nav;
this.config_fish_nav(roads, r, fish);
var f_data = fish.addComponent(fish_data);
f_data.init_fish_data(r);
if (r.use_AI !== 0) {
fish.addComponent(fish_AI);
}
}
}
onEnable() {
}
onDisable() {
}
}<file_sep>export default class event_mgr extends Laya.Script {
constructor() {
super();
}
onAwake() {
if(!event_mgr.Instance) {
event_mgr.Instance = this;
}
else {
console.log("error, event_mgr has multi instances");
return;
}
this.events_map = {}; // "事件名字": [{caller: xxxx, func: xxx}, {}, {}]
}
// 添加监听
add_listener(event_name, caller, func) {
if (!this.events_map[event_name]) {
this.events_map[event_name] = [];
}
var handler = { caller: caller, func: func };
this.events_map[event_name].push(handler);
}
// 删除监听
remove_listener(event_name, caller, func) {
if (!this.events_map[event_name]) {
return;
}
for(var i = 0; i < this.events_map[event_name].length; i ++) {
var handler = this.events_map[event_name][i];
if (handler.caller == caller && handler.func == func) {
this.events_map[event_name].splice(i, 1);
// return;
i --;
}
}
}
// 派送事件
dispatch_event(event_name, udata) {
if (!this.events_map[event_name]) {
return;
}
for(var i = 0; i < this.events_map[event_name].length; i ++) {
var handler = this.events_map[event_name][i];
handler.func.call(handler.caller, event_name, udata);
}
}
}<file_sep>import res_mgr from "./res_mgr";
export default class UI_manager extends Laya.Script {
constructor() {
super();
// 更多参数说明请访问: https://ldc2.layabox.com/doc/?nav=zh-as-2-4-0
}
onAwake() {
if (!UI_manager.Instance) {
UI_manager.Instance = this;
}
else {
console.log("error UI_manager multi instances");
return;
}
this.ui_view = {};
}
onEnable() {
}
onDisable() {
}
show_ui(name) {
var url = "res/ui_prefabs/" + name + ".json";
var prefab = res_mgr.Instance.get_prefab_res(url);
if (!prefab) {
return;
}
var obj = prefab.create();
this.owner.addChild(obj);
// 挂上脚本, name_ctrl类; 名字---》类型;
var cls = Laya.ClassUtils.getClass(name + "_ctrl");
if (cls) {
obj.addComponent(cls);
}
// end
this.ui_view[name] = obj;
}
remove_ui(name) {
if (this.ui_view[name]) {
this.ui_view[name].removeSelf();
this.ui_view[name] = null;
}
}
remove_all_ui() {
for(var key in this.ui_view) {
if (this.ui_view[key]) {
this.ui_view[key].removeSelf();
this.ui_view[key] = null;
}
}
}
}<file_sep>require("./UI_ctrls/uictrl_class_reg");
import game_mgr from "../managers/game_mgr";
import UI_manager from "../managers/UI_manager";
import res_mgr from "../managers/res_mgr";
import event_mgr from "../managers/event_mgr";
import fish_nav from "./fish_nav";
import farm_mgr from "./farm_mgr";
import bullet from "./bullet";
var SingleFarmWaypoint = require("./../maps/SingleFarmWaypoint");
var MultiFarmWaypoint = require("./../maps/MultiFarmWaypoint");
var ChooseFishData = require("../excels/ChooseFishData");
var FishGenData = require("../excels/FishGenData");
var MultiFishGen = require("../excels/MultiFishGen");
var ObjectType = require("./ObjectType");
var BulletConfig = require("../excels/BulletConfig");
var ugame = require("./ugame");
export default class game_app extends game_mgr {
constructor() {
super();
// 更多参数说明请访问: https://ldc2.layabox.com/doc/?nav=zh-as-2-4-0
}
onAwake() {
super.onAwake();
ugame.init();
if (!game_app.Instance) {
game_app.Instance = this;
}
else {
console.log("error game app multi instances");
return;
}
this.game_start();
}
game_start() {
res_mgr.Instance.preload_res_pkg({
prefabs: [
"res/ui_prefabs/LoginUI.json",
"res/ui_prefabs/ChooseUI.json",
"res/ui_prefabs/GameUI.json",
],
scene3D: [
"res/scenes3D/LayaScene_fisher_farmer/Conventional/fisher_farmer.ls",
],
sprite3D: [
"res/prefabs3D/LayaScene_fishes/Conventional/cheqiyu.lh",
"res/prefabs3D/LayaScene_fishes/Conventional/denglongyu.lh",
"res/prefabs3D/LayaScene_fishes/Conventional/dinianyu.lh",
"res/prefabs3D/LayaScene_fishes/Conventional/fangyu.lh",
"res/prefabs3D/LayaScene_fishes/Conventional/haigui.lh",
"res/prefabs3D/LayaScene_fishes/Conventional/hetun.lh",
"res/prefabs3D/LayaScene_fishes/Conventional/jianyu.lh",
"res/prefabs3D/LayaScene_fishes/Conventional/jinqiangyu.lh",
"res/prefabs3D/LayaScene_fishes/Conventional/shayu.lh",
"res/prefabs3D/LayaScene_fishes/Conventional/shayu2.lh",
"res/prefabs3D/LayaScene_fishes/Conventional/shiziyu.lh",
"res/prefabs3D/LayaScene_fishes/Conventional/tianshiyu.lh",
"res/prefabs3D/LayaScene_fishes/Conventional/xiaochouyu.lh",
"res/prefabs3D/LayaScene_fishes/Conventional/xiaohuangyu.lh",
"res/prefabs3D/LayaScene_fishes/Conventional/xiaolvyu.lh",
"res/prefabs3D/LayaScene_bullut/Conventional/bullet.lh",
],
atlas: [
"res/atlas/res/ui/achievements.atlas",
"res/atlas/res/ui/bullets.atlas",
"res/atlas/res/ui/button.atlas",
"res/atlas/res/ui/cannons.atlas",
"res/atlas/res/ui/choose_new.atlas",
"res/atlas/res/ui/common_icon.atlas",
"res/atlas/res/ui/common_image.atlas",
"res/atlas/res/ui/heads.atlas",
"res/atlas/res/ui/main_screen.atlas",
"res/atlas/res/ui/reward.atlas",
"res/atlas/res/ui/start.atlas",
],
sounds: [
"res/sounds/bgm_scene1.ogg",
"res/sounds/bgm_scene3.ogg",
"res/sounds/bgm_select.ogg",
"res/sounds/sfx_coin.ogg",
"res/sounds/sfx_coins.ogg",
"res/sounds/sfx_harpoon.ogg",
"res/sounds/sfx_net.ogg",
"res/sounds/sfx_levelup.ogg",
],
}, null, function() {
var scene3D = res_mgr.Instance.get_scens3d_res("res/scenes3D/LayaScene_fisher_farmer/Conventional/fisher_farmer.ls");
Laya.stage.addChild(scene3D);
scene3D.zOrder = -1;
var fish_far = scene3D.getChildByName("fish_farm");
var fish_root = fish_far.getChildByName("fish_root");
var camera = scene3D.getChildByName("Main Camera");
camera.useOcclusionCulling = false;
this.camera = camera;
this.scene3D = scene3D;
this.fish_far = fish_far;
this.farm_manger = fish_far.addComponent(farm_mgr);
this.shoot_point = fish_far.getChildByName("shoot_point");
this.fish_root = fish_root;
this.enter_logion_scene();
}.bind(this));
}
onStart() {
}
get_fish_net_pos(point) {
var screen_pos = new Laya.Vector3();
this.camera.worldToViewportPoint(point, screen_pos);
return new Laya.Vector2(screen_pos.x, screen_pos.y);
}
check_coin() {
var ulevel = ugame.ulevel;
var config = null;
if (ulevel > BulletConfig.filed_data_array.total_count) {
config = BulletConfig.get_record(BulletConfig.filed_data_array.total_count);
}
else {
config = BulletConfig.get_record(ulevel);
}
return (ugame.coin >= config.coinCost);
}
// out_result: 用户传入一个表,我再函数里面往这个表里来添加结构就可以,返回以后外面就可以拿到;
// 返回值: bool true: 正常获得射击点,false, 就无法射击;
get_shoot_point(out_result) {
var ray = new Laya.Ray(new Laya.Vector3(0, 0, 0), new Laya.Vector3(0, 0, 1)); // 射线对象
var screen_pos = new Laya.Vector2(Laya.MouseManager.instance.mouseX, Laya.MouseManager.instance.mouseY);
this.camera.viewportPointToRay(screen_pos, ray); // ray 生成了一个摄像对象,从屏幕点击位置开始,垂直与屏幕发射一条射线;
var hit = new Laya.HitResult();
if (!this.scene3D.physicsSimulation.rayCast(ray, hit)) { // 射线状态了某个物体;
return false;
}
var dst = hit.point;
if (hit.collider.owner.layer == ObjectType.BG || hit.collider.owner.layer == ObjectType.BULLET) {
dst.z = 20;
// 找这个最近的鱼;
if (this.farm_manger.find_nearest_fish(dst)) {
}
// end
}
else {
console.log(hit.collider.owner.name);
}
var dx = dst.x - this.shoot_point.transform.position.x;
var dy = dst.y - this.shoot_point.transform.position.y;
var r = Math.atan2(dy, dx);
var degree = r * 180 / Math.PI;
// 特别注意;由于laya的坐标系是从后面看的,所以球的角度,和我们认同不一样;
degree = 180 - degree;
out_result.dst = dst;
out_result.degree = degree;
return true;
}
shoot_bullet(dst, degree) {
var ulevel = ugame.ulevel;
var config = null;
if (ulevel > BulletConfig.filed_data_array.total_count) {
config = BulletConfig.get_record(BulletConfig.filed_data_array.total_count);
}
else {
config = BulletConfig.get_record(ulevel);
}
var bullet_prefab = res_mgr.Instance.get_sprite3D_res("res/prefabs3D/LayaScene_bullut/Conventional/" + config.bulletName + ".lh");
var obj = Laya.Sprite3D.instantiate(bullet_prefab);
obj.layer = ObjectType.BULLET;
this.shoot_point.addChild(obj);
obj.transform.localPosition = Laya.Vector3._ZERO;
var b = obj.addComponent(bullet);
b.init_bullet(config);
b.shoot_to(dst, degree);
ugame.add_chip(-config.coinCost);
}
enter_logion_scene() {
Laya.SoundManager.stopMusic();
Laya.SoundManager.playMusic("res/sounds/bgm_scene1.ogg", 0);
this.farm_manger.remove_all_fishes();
this.farm_manger.gen_fishes(SingleFarmWaypoint.roads, ChooseFishData);
UI_manager.Instance.show_ui("LoginUI");
}
enter_choose_scene() {
Laya.SoundManager.stopMusic();
Laya.SoundManager.playMusic("res/sounds/bgm_select.ogg", 0);
UI_manager.Instance.remove_all_ui();
UI_manager.Instance.show_ui("ChooseUI");
}
enter_game_scene() {
Laya.SoundManager.stopMusic();
Laya.SoundManager.playMusic("res/sounds/bgm_scene3.ogg", 0);
this.farm_manger.remove_all_fishes();
this.farm_manger.gen_fishes(SingleFarmWaypoint.roads, FishGenData);
this.farm_manger.gen_fishes(MultiFarmWaypoint.roads, MultiFishGen);
UI_manager.Instance.remove_all_ui();
UI_manager.Instance.show_ui("GameUI");
}
onEnable() {
}
onDisable() {
}
}<file_sep>import UI_manager from "./UI_manager";
import res_mgr from "./res_mgr";
import event_mgr from "./event_mgr";
export default class game_mgr extends Laya.Script {
constructor() {
super();
}
onAwake() {
console.log("init game framwork... ...");
this.owner.addComponent(UI_manager);
this.owner.addComponent(res_mgr);
this.owner.addComponent(event_mgr);
console.log("end init game framwork");
}
onEnable() {
}
onDisable() {
}
}<file_sep>讲要转的csv文件,放到这个路径下,
转换csv的编码格式为utf8;
编辑cs2js.bat脚本,将文件路径,输出路径路径添加好,即可<file_sep>export default class res_mgr extends Laya.Script {
constructor() {
super();
}
onAwake() {
if (!res_mgr.Instance) {
res_mgr.Instance = this;
}
else {
console.log("err res_mgr has multi instances");
return;
}
this.prefabs_res = {};
this.scene3D_res = {};
this.sprite3D_res = {};
this.atlas_res = {};
this.sound_res = {};
}
_one_res_load_finished() {
this.now_num ++;
if (this.on_progress) {
this.on_progress(this.now_num / this.total_num);
}
if (this.now_num >= this.total_num && this.on_load_finished) {
this.on_load_finished();
}
}
load_prefab(url) {
Laya.loader.load(url, Laya.Handler.create(this, function(json) {
var pref = new Laya.Prefab();
pref.json = json;
this.prefabs_res[url] = pref;
this._one_res_load_finished();
}));
}
reslease_prefab(url) {
if (!this.prefabs_res[url]) {
return;
}
this.prefabs_res[url].json = null;
Laya.loader.clearRes(url);
this.prefabs_res[url] = null;
}
load_atlas(url) {
Laya.loader.load(url, Laya.Handler.create(this, function(atlas) {
this.atlas_res[url] = atlas;
this._one_res_load_finished();
}));
}
reslease_atlas(url) {
if (!this.atlas_res[url]) {
return;
}
Laya.loader.clearRes(url);
this.atlas_res[url] = null;
}
load_sound(url) {
Laya.loader.load(url, Laya.Handler.create(this, function(atlas) {
this.sound_res[url] = atlas;
this._one_res_load_finished();
}));
}
reslease_sound(url) {
if (!this.sound_res[url]) {
return;
}
Laya.loader.clearRes(url);
this.sound_res[url] = null;
}
load_scene3D(url) {
Laya.Scene3D.load(url, Laya.Handler.create(this, function(scene3d) {
this.scene3D_res[url] = scene3d;
this._one_res_load_finished();
}));
}
release_scene3D(url) {
if (!this.scene3D_res[url]) {
return;
}
this.scene3D_res[url] = null;
}
load_sprite3D(url) {
Laya.Sprite3D.load(url, Laya.Handler.create(this, function(sprite3d) {
this.sprite3D_res[url] = sprite3d;
this._one_res_load_finished();
}));
}
release_sprite3D(url) {
if (!this.sprite3D_res[url]) {
return;
}
this.sprite3D_res[url] = null;
}
// res_set: {prefabs: [], scene3D: [], sprite3D:[], atlas: [], imgs: [], sounds:[]}
// on_progress(per)
// on_load_finished
preload_res_pkg(res_pkg, on_progress, on_load_finished) {
var i = 0;
var url = "";
this.on_progress = on_progress;
this.on_load_finished = on_load_finished;
this.total_num = 0;
for(var key in res_pkg) {
this.total_num += res_pkg[key].length;
}
this.now_num = 0;
if (res_pkg.prefabs) {
for(i = 0; i < res_pkg.prefabs.length; i ++) {
url = res_pkg.prefabs[i];
this.load_prefab(url);
}
}
if (res_pkg.atlas) {
for(i = 0; i < res_pkg.atlas.length; i ++) {
url = res_pkg.atlas[i];
this.load_atlas(url);
}
}
if (res_pkg.sounds) {
for(i = 0; i < res_pkg.sounds.length; i ++) {
url = res_pkg.sounds[i];
this.load_sound(url);
}
}
if (res_pkg.scene3D) {
for(i = 0; i < res_pkg.scene3D.length; i ++) {
url = res_pkg.scene3D[i];
this.load_scene3D(url);
}
}
if (res_pkg.sprite3D) {
for(i = 0; i < res_pkg.sprite3D.length; i ++) {
url = res_pkg.sprite3D[i];
this.load_sprite3D(url);
}
}
}
release_res_pkg(res_pkg) {
if (res_pkg.prefabs) {
for(i = 0; i < res_pkg.prefabs.length; i ++) {
url = res_pkg.prefabs[i];
this.reslease_prefab(url);
}
}
if (res_pkg.atlas) {
for(i = 0; i < res_pkg.atlas.length; i ++) {
url = res_pkg.atlas[i];
this.reslease_atlas(url);
}
}
if (res_pkg.sounds) {
for(i = 0; i < res_pkg.sounds.length; i ++) {
url = res_pkg.sounds[i];
this.reslease_sound(url);
}
}
if (res_pkg.scene3D) {
for(i = 0; i < res_pkg.scene3D.length; i ++) {
url = res_pkg.scene3D[i];
this.release_scene3D(url);
}
}
if (res_pkg.sprite3D) {
for(i = 0; i < res_pkg.sprite3D.length; i ++) {
url = res_pkg.sprite3D[i];
this.release_sprite3D(url);
}
}
}
get_prefab_res(url) {
return this.prefabs_res[url];
}
get_scens3d_res(url) {
return this.scene3D_res[url];
}
get_sprite3D_res(url) {
return this.sprite3D_res[url];
}
}<file_sep>/* MultiFishGen filed desic: (13 fileds)
id: string
fishName: string
roadIndex: int
isRevert: int
genTime: int
isRandom: int
scale: int
min_speed: int
max_speed: int
hp: int
coinValue: int
expValue: int
use_AI: int
*/
var filed_data = {
key_1: ["1", "tianshiyu", 0, 0, 0, 0, 600, 10000, 10000, 300, 500, 100, 0, ],
key_2: ["2", "tianshiyu", 1, 0, 500, 0, 700, 10000, 10000, 300, 500, 100, 0, ],
key_3: ["3", "tianshiyu", 2, 0, 500, 0, 800, 10000, 10000, 300, 500, 100, 0, ],
key_4: ["4", "cheqiyu", 3, 0, 0, 0, 1000, 10000, 10000, 300, 500, 100, 0, ],
key_5: ["<KEY>", 3, 0, 500, 0, 900, 10000, 10000, 300, 500, 100, 0, ],
key_6: ["6", "cheqiyu", 3, 0, 1000, 0, 800, 10000, 10000, 300, 500, 100, 0, ],
key_7: ["7", "cheqiyu", 3, 0, 1500, 0, 700, 10000, 10000, 300, 500, 100, 0, ],
total_count: 7
};
function get_record(id) {
var key = "key_" + id;
var record_array = filed_data[key];
if(!record_array) {
return null;
}
var record = {
id: record_array[0],
fishName: record_array[1],
roadIndex: record_array[2],
isRevert: record_array[3],
genTime: record_array[4],
isRandom: record_array[5],
scale: record_array[6],
min_speed: record_array[7],
max_speed: record_array[8],
hp: record_array[9],
coinValue: record_array[10],
expValue: record_array[11],
use_AI: record_array[12],
};
return record;
}
var MultiFishGen = {
filed_data_array: filed_data,
get_record: get_record,
};
module.exports = MultiFishGen;<file_sep>import UI_ctrl from "../../managers/UI_ctrl";
import game_app from "../game_app";
var ugame = require("../ugame");
export default class ChooseUI_ctrl extends UI_ctrl {
constructor() {
super();
// 更多参数说明请访问: https://ldc2.layabox.com/doc/?nav=zh-as-2-4-0
}
onAwake() {
super.onAwake();
this.view["center/start_button"].on(Laya.Event.CLICK, this, this.on_start_click);
this.view["coin/label"].text = "" + ugame.coin;
this.view["damon/label"].text = "" + ugame.damon;
}
onStart() {
}
on_start_click(){
game_app.Instance.enter_game_scene();
}
onEnable() {
}
onDisable() {
}
}<file_sep>import event_mgr from "../managers/event_mgr";
var LevelConfig = require("../excels/LevelConfig");
var ugame = {
coin: 0,
damon: 0,
exp: 0,
ulevel: 0,
init() {
var str = Laya.LocalStorage.getItem("is_saved");
var is_saved = 0;
if (str) {
is_saved = parseInt(str);
}
if (is_saved === 0) {
this.coin = 8000;
this.damon = 10;
this.exp = 0;
Laya.LocalStorage.setItem("is_saved", "1");
Laya.LocalStorage.setItem("coin", "" + this.coin);
Laya.LocalStorage.setItem("damon", "" + this.damon);
Laya.LocalStorage.setItem("exp", "" + this.exp);
}
else {
str = Laya.LocalStorage.getItem("coin");
this.coin = parseInt(str);
str = Laya.LocalStorage.getItem("damon");
this.damon = parseInt(str);
str = Laya.LocalStorage.getItem("exp");
this.exp = parseInt(str);
}
this.ulevel = this.get_level(this.exp);
},
get_level(exp) {
var level = 0;
for(var i = 0; i < LevelConfig.filed_data_array.total_count; i ++) {
if(LevelConfig.get_record(i + 1).exp > exp) {
break;
}
level = i + 1;
}
return level;
},
add_chip(chip) { // chip < 0;
this.coin += chip;
if (this.coin < 0) {
this.coin = 0;
}
Laya.LocalStorage.setItem("coin", "" + this.coin);
// 金币变换了以后你要抛事件
event_mgr.Instance.dispatch_event("coin_change", null);
},
add_damon(damon) { // chip < 0;
this.damon += damon;
if (this.damon < 0) {
this.damon = 0;
}
Laya.LocalStorage.setItem("damon", "" + this.damon);
event_mgr.Instance.dispatch_event("damon_change", null);
},
add_exp(exp) {
this.exp += exp;
Laya.LocalStorage.setItem("exp", "" + this.exp);
event_mgr.Instance.dispatch_event("exp_change", null);
// 经验增加了有可能触发升级;
var prev_level = this.ulevel;
this.ulevel = this.get_level(this.exp);
if (prev_level != this.ulevel) {
event_mgr.Instance.dispatch_event("lv_ugrade", null);
}
},
get_avator_icon() {
var r = LevelConfig.get_record(this.ulevel);
return "res/ui/heads/" + r.headName + ".png";
},
get_cannon_icon() {
var r = LevelConfig.get_record(this.ulevel);
return "res/ui/cannons/" + r.cannonIconName + ".png";
},
};
module.exports = ugame;<file_sep>export default class UI_Button extends Laya.Script {
/** @prop {name:normal_img, tips:"按钮弹起的状态", accept:res, default:null}*/
/** @prop {name:press_img, tips:"按钮按下的状态", accept:res, default:null}*/
/** @prop {name:disable_img, tips:"按钮禁用的状态", accept:res, default:null}*/
/** @prop {name:is_disable, tips:"是否被禁用", type:Bool, default:false}*/
constructor() {
super();
this.normal_img = null;
this.press_img = null;
this.disable_img = null;
this.is_disable = false;
// 更多参数说明请访问: https://ldc2.layabox.com/doc/?nav=zh-as-2-4-0
}
onAwake() {
this.owner.on(Laya.Event.CLICK, this, this.on_click);
this.set_disable(this.is_disable);
}
on_click(event) {
event.stopPropagation();
}
onMouseDown() {
if (this.is_disable) {
return;
}
if (this.press_img !== null) {
this.owner.skin = this.press_img;
}
}
onMouseUp() {
if (this.is_disable) {
return;
}
if (this.normal_img !== null) {
this.owner.skin = this.normal_img;
}
}
set_disable(is_disable) {
if (this.is_disable == is_disable) {
return;
}
this.is_disable = is_disable;
if (this.is_disable) {
if (this.disable_img) {
this.owner.skin = this.disable_img;
}
}
else {
if (this.normal_img) {
this.owner.skin = this.normal_img;
}
}
}
onEnable() {
}
onDisable() {
}
}<file_sep>import fish_nav from "./fish_nav";
var ugame = require("./ugame");
export default class fish_data extends Laya.Script {
constructor() {
super();
this.config = null;
this.hp = 0;
}
onAwake() {
this.nav = this.owner.getComponent(fish_nav);
}
init_fish_data(config) {
this.config = config;
this.hp = this.config.hp;
}
on_kill(kill_value) {
this.hp -= kill_value;
if (this.hp <= 0) { // 鱼挂了
//
Laya.SoundManager.playSound("res/sounds/sfx_coin.ogg");
ugame.add_chip(this.config.coinValue);
ugame.add_exp(this.config.expValue);
//
this.reset();
}
}
reset() {
this.hp = this.config.hp;
var delay_ms = 5000 + Math.random() * 5000; // [5000, 10000]
this.nav.reset(delay_ms);
}
}<file_sep>var ObjectType = {
BG: 8,
BULLET: 9,
FISH: 10,
};
module.exports = ObjectType;<file_sep>
export default class fish_nav extends Laya.Script3D {
constructor() {
super();
}
onAwake() {
this.is_revert = false; // false: 0, next_step ++, true: 最后一个点, next_step --;
this.is_walking = false;
this.speed = 5;
}
reset(delay_ms) {
this.is_walking = false;
var index = Math.random() * this.roads.length;
index = Math.floor(index);
this.is_revert = !this.is_revert;
this.walk_on_road(this.roads, index, this.is_revert, delay_ms);
}
walk_on_road(roads, index, revert, delay_ms) {
this.is_walking = false;
if (index < 0 || index >= roads.length) {
return;
}
this.roads = roads;
this.road_data = roads[index];
this.is_revert = revert;
if (this.road_data.length < 2) {
return;
}
if (this.is_revert) {
var last_index = this.road_data.length - 1;
this.owner.transform.localPosition = new Laya.Vector3(this.road_data[last_index].x + 1500, this.road_data[last_index].y, this.road_data[last_index].z);
this.next_step = last_index - 1;
}
else {
this.owner.transform.localPosition = new Laya.Vector3(this.road_data[0].x + 1500, this.road_data[0].y, this.road_data[0].z);
this.next_step = 1;
}
if (delay_ms > 0) {
Laya.timer.once(delay_ms, this, this.walk_to_next);
}
else {
this.walk_to_next();
}
}
walk_to_next() {
if (this.next_step >= this.road_data.length || this.next_step < 0) {
this.is_walking = false;
var index = Math.random() * this.roads.length;
index = Math.floor(index);
this.is_revert = !this.is_revert;
this.walk_on_road(this.roads, index, this.is_revert, 0);
return;
}
var src = this.owner.transform.localPosition;
var dst = new Laya.Vector3(this.road_data[this.next_step].x + 1500, this.road_data[this.next_step].y, this.road_data[this.next_step].z);
var dir = new Laya.Vector3();
Laya.Vector3.subtract(dst, src, dir);
var len = Laya.Vector3.scalarLength(dir);
if (len <= 0) {
if (this.is_revert) {
this.next_step --;
}
else {
this.next_step ++;
}
this.walk_to_next();
return;
}
this.walk_time = len / this.speed;
this.passed_time = 0;
this.vx = this.speed * (dir.x / len);
this.vy = this.speed * (dir.y / len);
this.vz = this.speed * (dir.z / len);
this.is_walking = true;
// 鱼头的朝向 --->transfrom LookAt;
var rot = this.owner.transform.localRotation;
this.owner.transform.lookAt(dst, new Laya.Vector3(0, 1, 0), true);
this.dst_rot = this.owner.transform.localRotation;
this.owner.transform.localRotation = rot;
}
onUpdate() {
if (this.is_walking === false) {
return;
}
var dt = Laya.timer.delta / 1000;
this.passed_time += dt;
if (this.passed_time > this.walk_time) {
dt -= (this.passed_time - this.walk_time);
}
var pos = this.owner.transform.localPosition;
pos.x += (this.vx * dt);
pos.y += (this.vy * dt);
pos.z += (this.vz * dt);
this.owner.transform.localPosition = pos;
// 旋转插值;
var new_rot = new Laya.Quaternion();
Laya.Quaternion.slerp(this.dst_rot, this.owner.transform.localRotation, 2 * dt, new_rot);
this.owner.transform.localRotation = new_rot;
// end
if (this.passed_time >= this.walk_time) {
if (this.is_revert) {
this.next_step --;
}
else {
this.next_step ++;
}
this.walk_to_next();
}
}
onEnable() {
}
onDisable() {
}
} | 4cc0ee721340c4a99be7bf48ca089c5ee2541ee2 | [
"Text",
"JavaScript"
] | 16 | Text | guaishou-com/fish | 87bb498a6ad96b8e41d16710341071bf434257f4 | 974ef784a1a5a44cd0730052ad42941e38ecf01d |
refs/heads/main | <repo_name>SaqlainAveiro/SaqlainAveiro.github<file_sep>/README.md
# SaqlainAveiro.github.io | 55cd4226a0eb1ca6edc3e8146771a5590988e677 | [
"Markdown"
] | 1 | Markdown | SaqlainAveiro/SaqlainAveiro.github | c3d9d277e5c252660a81c5632ca3d87d5ab42c23 | a6fd51c4901818f764391b9280ffa6e45b7b745c |
refs/heads/master | <repo_name>grondag/brocade-connect<file_sep>/README.md
# Brocade Connections
## This is now a [sub-project of Brocade](https://github.com/grondag/brocade/tree/master/brocade-connect). This repo will eventually be archived.
This is a library for Minecraft mods using the Fabric toolchain. It is meant to be distributed as a nested jar and introduces no additional dependencies.
The library functions are relative to a central block and the 26 neighbor blocks immediately adjacent to it. For the "block neighborhood" the library offers:
1) Declarations to describe the location and orientation of neighboring blocks relative to the central block.
2) Cached and lazily-evaluated access to block state and optionally, results of a provided state function and test function.
3) Fast computation of "join" state for connected textures and shapes, either simple (six adjacent blocks) or complex (dependent on up to all twenty-six neighbors.)
4) Low- or no-allocation operation via threadLocal and pooled instances.
## Development Environment
```
repositories {
maven {
name = "grondag"
url = "https://grondag-repo.appspot.com"
credentials {
username "guest"
password ""
}
}
}
dependencies {
// recommended but not required
compile "org.apiguardian:apiguardian-api:1.0.0"
compile "com.google.code.findbugs:jsr305:3.0.2"
modCompile "grondag:brocade-connect:1.0.+"
include "grondag:brocade-connect:1.0.+"
}
```
## Sample of Usage
Obtaining the state needed for a connected-texture model is straightforward. If the connection depends only on block state, the test function is simply this:
```java
static final BlockTest MATCHER = (b0, m0, b1, m1) -> b0 != null && b0.equals(b1);
```
Retrieving the connection state is then done with a single call:
```java
CornerJoinState joinState = CornerJoinState.fromWorld(BlockNeighbors.threadLocal(blockView, pos, MATCHER));
```
Using the state to texture or transform models is not in the scope of this library. Future Brocade libraries will provide those features but a crude working example can be found in https://github.com/grondag/smart_chest.
| 6e338da315044afd16b12b8baf9a0c144eb90655 | [
"Markdown"
] | 1 | Markdown | grondag/brocade-connect | b72062aee93c8802a209c258a8daac926ab8c1ac | af46956a88676be644aff4f8e3f640378ef73c9e |
refs/heads/master | <file_sep>/**
* Ext GWT 3.0.0-rc - Ext for GWT
* Copyright(c) 2007-2011, Sencha, Inc.
* <EMAIL>
*
* http://sencha.com/license
*/
package com.sencha.gxt.examples.resources.shared;
import com.google.web.bindery.requestfactory.shared.RequestFactory;
public interface ExampleRequestFactory extends RequestFactory {
MusicRequest music();
FolderRequest folder();
PostRequest post();
}
<file_sep>/**
* Ext GWT 3.0.0-rc - Ext for GWT
* Copyright(c) 2007-2011, Sencha, Inc.
* <EMAIL>
*
* http://sencha.com/license
*/
package com.sencha.gxt.examples.resources.client.model;
import java.util.Date;
import com.sencha.gxt.core.client.ValueProvider;
import com.sencha.gxt.data.shared.ModelKeyProvider;
import com.sencha.gxt.data.shared.PropertyAccess;
public interface PostProperties extends PropertyAccess<Post> {
ValueProvider<Post, Date> date();
ValueProvider<Post, String> forum();
ModelKeyProvider<Post> id();
ValueProvider<Post, String> subject();
ValueProvider<Post, String> username();
}
<file_sep>/**
* Ext GWT 3.0.0-rc - Ext for GWT
* Copyright(c) 2007-2011, Sencha, Inc.
* <EMAIL>
*
* http://sencha.com/license
*/
package com.sencha.gxt.explorer.client.model;
import com.google.gwt.editor.client.Editor.Path;
import com.sencha.gxt.core.client.ValueProvider;
import com.sencha.gxt.data.shared.ModelKeyProvider;
import com.sencha.gxt.data.shared.PropertyAccess;
public class NamedModel {
public interface NamedModelProperties extends PropertyAccess<NamedModel> {
@Path("name")
ModelKeyProvider<NamedModel> kp();
ValueProvider<NamedModel, String> name();
}
private String name;
protected NamedModel(String name) {
this.name = name;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
<file_sep>/**
* Ext GWT 3.0.0-rc - Ext for GWT
* Copyright(c) 2007-2011, Sencha, Inc.
* <EMAIL>
*
* http://sencha.com/license
*/
package com.sencha.gxt.explorer.client.tabs;
import com.google.gwt.core.client.EntryPoint;
import com.google.gwt.core.client.GWT;
import com.google.gwt.uibinder.client.UiBinder;
import com.google.gwt.uibinder.client.UiField;
import com.google.gwt.uibinder.client.UiHandler;
import com.google.gwt.user.client.ui.IsWidget;
import com.google.gwt.user.client.ui.RootPanel;
import com.google.gwt.user.client.ui.Widget;
import com.sencha.gxt.examples.resources.client.TestData;
import com.sencha.gxt.explorer.client.model.Example.Detail;
import com.sencha.gxt.widget.core.client.TabItemConfig;
import com.sencha.gxt.widget.core.client.TabPanel;
import com.sencha.gxt.widget.core.client.event.SelectionEvent;
import com.sencha.gxt.widget.core.client.info.Info;
@Detail(name = "Basic Tabs (UiBinder)", icon = "basictabs", category = "Tabs", files = "BasicTabUiBinderExample.ui.xml")
public class BasicTabUiBinderExample implements IsWidget, EntryPoint {
interface MyUiBinder extends UiBinder<Widget, BasicTabUiBinderExample> {
}
private static MyUiBinder uiBinder = GWT.create(MyUiBinder.class);
@UiField(provided = true)
String txt = TestData.DUMMY_TEXT_SHORT;
public Widget asWidget() {
return uiBinder.createAndBindUi(this);
}
public void onModuleLoad() {
RootPanel.get().add(asWidget());
}
@UiHandler(value = {"folder", "panel"})
void onSelection(SelectionEvent<Widget> event) {
TabPanel panel = (TabPanel) event.getSource();
Widget w = event.getItem();
TabItemConfig config = panel.getConfig(w);
Info.display("Message", "'" + config.getText() + "' Selected");
}
}
<file_sep>/**
* Ext GWT 3.0.0-rc - Ext for GWT
* Copyright(c) 2007-2011, Sencha, Inc.
* <EMAIL>
*
* http://sencha.com/license
*/
package com.sencha.gxt.explorer.client.app.place;
import com.google.gwt.place.shared.Place;
import com.google.gwt.place.shared.PlaceTokenizer;
public class ExamplePlace extends Place {
public static class Tokenizer implements PlaceTokenizer<ExamplePlace> {
@Override
public ExamplePlace getPlace(String token) {
return new ExamplePlace(token);
}
@Override
public String getToken(ExamplePlace place) {
return place.getExampleId().toString();
}
}
private String exampleId;
public ExamplePlace(String exampleId) {
this.exampleId = exampleId;
}
@Override
public boolean equals(Object obj) {
if (obj instanceof ExamplePlace) {
return exampleId.equals(((ExamplePlace) obj).exampleId);
}
return false;
}
public String getExampleId() {
return exampleId;
}
@Override
public int hashCode() {
return exampleId.hashCode();
}
}
<file_sep>.x-html-editor-wrap {
border:1px solid;
}
.x-html-editor-tb .x-btn-text {
background:transparent no-repeat;
}
.x-html-editor-tip .x-tip-bd .x-tip-bd-inner {
padding:5px;
padding-bottom:1px;
}
.x-html-editor-tb .x-toolbar {
position:static !important;
}<file_sep>.x-box-tl {
background: transparent no-repeat 0 0;
zoom:1;
}
.x-box-tc {
height: 8px;
background: transparent repeat-x 0 0;
overflow: hidden;
}
.x-box-tr {
background: transparent no-repeat right -8px;
}
.x-box-ml {
background: transparent repeat-y 0;
padding-left: 4px;
overflow: hidden;
zoom:1;
}
.x-box-mc {
background: repeat-x 0 -16px;
padding: 4px 10px;
}
.x-box-mc h3 {
margin: 0 0 4px 0;
zoom:1;
}
.x-box-mr {
background: transparent repeat-y right;
padding-right: 4px;
overflow: hidden;
}
.x-box-bl {
background: transparent no-repeat 0 -16px;
zoom:1;
}
.x-box-bc {
background: transparent repeat-x 0 -8px;
height: 8px;
overflow: hidden;
}
.x-box-br {
background: transparent no-repeat right -24px;
}
.x-box-tl, .x-box-bl {
padding-left: 8px;
overflow: hidden;
}
.x-box-tr, .x-box-br {
padding-right: 8px;
overflow: hidden;
}
<file_sep>/**
* Ext GWT 3.0.0-rc - Ext for GWT
* Copyright(c) 2007-2011, Sencha, Inc.
* <EMAIL>
*
* http://sencha.com/license
*/
package com.sencha.gxt.examples.test.client;
import com.google.gwt.core.client.EntryPoint;
import com.google.gwt.user.client.ui.RootPanel;
import com.sencha.gxt.widget.core.client.form.FileUploadField;
public class FileUploadFieldTest implements EntryPoint {
@Override
public void onModuleLoad() {
FileUploadField field = new FileUploadField();
field.setWidth(250);
RootPanel.get().add(field);
}
}
<file_sep>/**
* Ext GWT 3.0.0-rc - Ext for GWT
* Copyright(c) 2007-2011, Sencha, Inc.
* <EMAIL>
*
* http://sencha.com/license
*/
package com.sencha.gxt.examples.test.client;
import com.google.gwt.core.client.EntryPoint;
import com.google.gwt.user.client.ui.HTML;
import com.google.gwt.user.client.ui.RootPanel;
import com.sencha.gxt.core.client.resources.ThemeStyles;
import com.sencha.gxt.widget.core.client.container.CssFloatLayoutContainer;
import com.sencha.gxt.widget.core.client.container.CssFloatLayoutContainer.CssFloatData;
public class CssFloatLayoutContainerTest implements EntryPoint {
@Override
public void onModuleLoad() {
CssFloatLayoutContainer con = new CssFloatLayoutContainer();
con.setBorders(true);
con.setPixelSize(400, 400);
HTML html = new HTML("one");
html.addStyleName(ThemeStyles.getStyle().border());
con.add(html, new CssFloatData(.5));
html = new HTML("two");
html.addStyleName(ThemeStyles.getStyle().border());
con.add(html, new CssFloatData(.5));
RootPanel.get().add(con);
}
}
<file_sep>/**
* Ext GWT 3.0.0-rc - Ext for GWT
* Copyright(c) 2007-2011, Sencha, Inc.
* <EMAIL>
*
* http://sencha.com/license
*/
package com.sencha.gxt.explorer.client;
import com.google.gwt.core.client.EntryPoint;
import com.google.gwt.core.client.GWT;
import com.sencha.gxt.core.client.GXT;
import com.sencha.gxt.explorer.client.app.ioc.ExplorerGinjector;
import com.sencha.gxt.state.client.CookieProvider;
import com.sencha.gxt.state.client.StateManager;
public class Explorer implements EntryPoint {
private final ExplorerGinjector injector = GWT.create(ExplorerGinjector.class);
@Override
public void onModuleLoad() {
StateManager.get().setProvider(new CookieProvider("/", null, null, GXT.isSecure()));
ExplorerApp app = injector.getApp();
app.run();
onReady();
}
private native void onReady() /*-{
if (typeof $wnd.GxtReady != 'undefined') {
$wnd.GxtReady();
}
}-*/;
}
<file_sep>/**
* Ext GWT 3.0.0-rc - Ext for GWT
* Copyright(c) 2007-2011, Sencha, Inc.
* <EMAIL>
*
* http://sencha.com/license
*/
package com.sencha.gxt.examples.resources.shared;
import com.google.web.bindery.requestfactory.shared.EntityProxy;
import com.google.web.bindery.requestfactory.shared.EntityProxyId;
public interface NamedProxy extends EntityProxy {
Integer getId();
String getName();
@Override
public EntityProxyId<? extends NamedProxy> stableId();
}
<file_sep>/**
* Ext GWT 3.0.0-rc - Ext for GWT
* Copyright(c) 2007-2011, Sencha, Inc.
* <EMAIL>
*
* http://sencha.com/license
*/
package com.sencha.gxt.explorer.client.app.mvp;
import com.google.gwt.place.shared.PlaceHistoryMapper;
import com.google.gwt.place.shared.WithTokenizers;
import com.sencha.gxt.explorer.client.app.place.ExamplePlace;
@WithTokenizers({ExamplePlace.Tokenizer.class})
public interface ExplorerPlaceHistoryMapper extends PlaceHistoryMapper {
}
<file_sep>/**
* Ext GWT 3.0.0-rc - Ext for GWT
* Copyright(c) 2007-2011, Sencha, Inc.
* <EMAIL>
*
* http://sencha.com/license
*/
package com.sencha.gxt.examples.resources.client.model;
public class FolderModel extends FileModel {
protected FolderModel() {
}
public FolderModel(String name, String path) {
super(name, path);
}
}
<file_sep>/**
* Ext GWT 3.0.0-rc - Ext for GWT
* Copyright(c) 2007-2011, Sencha, Inc.
* <EMAIL>
*
* http://sencha.com/license
*/
package com.sencha.gxt.examples.resources.client.model;
import java.util.List;
@SuppressWarnings("serial")
public class FolderDto extends BaseDto {
private List<BaseDto> children;
protected FolderDto() {
}
public FolderDto(Integer id, String name) {
super(id, name);
}
public List<BaseDto> getChildren() {
return children;
}
public void setChildren(List<BaseDto> children) {
this.children = children;
}
public void addChild(BaseDto child) {
getChildren().add(child);
}
}
<file_sep>/**
* Ext GWT 3.0.0-rc - Ext for GWT
* Copyright(c) 2007-2011, Sencha, Inc.
* <EMAIL>
*
* http://sencha.com/license
*/
package com.sencha.gxt.examples.test.client;
import com.google.gwt.core.client.EntryPoint;
import com.google.gwt.user.client.ui.RootPanel;
import com.sencha.gxt.widget.core.client.FramedPanel;
import com.sencha.gxt.widget.core.client.button.TextButton;
import com.sencha.gxt.widget.core.client.toolbar.ToolBar;
public class ContentPanelTest implements EntryPoint {
@Override
public void onModuleLoad() {
FramedPanel panel = new FramedPanel();
panel.setHeadingText("ContentPanel");
panel.setPixelSize(400, 400);
panel.setCollapsible(true);
panel.setAnimCollapse(true);
panel.setPagePosition(200, 200);
panel.setBodyBorder(false);
// panel.collapse();
ToolBar bar = new ToolBar();
bar.add(new TextButton("Foo"));
RootPanel.get().add(panel);
}
}
<file_sep>/**
* Ext GWT 3.0.0-rc - Ext for GWT
* Copyright(c) 2007-2011, Sencha, Inc.
* <EMAIL>
*
* http://sencha.com/license
*/
package com.sencha.gxt.examples.test.client;
import com.google.gwt.core.client.EntryPoint;
import com.google.gwt.user.client.Timer;
import com.google.gwt.user.client.ui.RootPanel;
import com.sencha.gxt.widget.core.client.ProgressBar;
import com.sencha.gxt.widget.core.client.info.Info;
public class ProgressBarTest implements EntryPoint {
@Override
public void onModuleLoad() {
final ProgressBar bar = new ProgressBar();
bar.setWidth(400);
final Timer t = new Timer() {
float i;
@Override
public void run() {
bar.updateProgress(i / 100, (int) i + "% Complete");
i += 5;
if (i > 105) {
cancel();
Info.display("Message", "Items were loaded");
}
}
};
t.scheduleRepeating(500);
RootPanel.get().add(bar);
}
}
<file_sep>/**
* Ext GWT 3.0.0-rc - Ext for GWT
* Copyright(c) 2007-2011, Sencha, Inc.
* <EMAIL>
*
* http://sencha.com/license
*/
package com.sencha.gxt.examples.resources.server;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import com.sencha.gxt.data.shared.SortInfo;
import com.sencha.gxt.data.shared.SortInfoBean;
import com.sencha.gxt.data.shared.loader.PagingLoadConfigBean;
import com.sencha.gxt.data.shared.loader.PagingLoadResult;
import com.sencha.gxt.examples.resources.client.model.Post;
import com.sencha.gxt.examples.resources.shared.PostRequest.PostPagingLoadResultBean;
public class PostService {
private List<Post> posts;
public PostService() {
posts = new ArrayList<Post>();
SimpleDateFormat sf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
try {
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
Document doc = db.parse(getClass().getResourceAsStream("posts.xml"));
doc.getDocumentElement().normalize();
NodeList nodeList = doc.getElementsByTagName("row");
for (int s = 0; s < nodeList.getLength(); s++) {
Node fstNode = nodeList.item(s);
if (fstNode.getNodeType() == Node.ELEMENT_NODE) {
Element fstElmnt = (Element) fstNode;
NodeList fields = fstElmnt.getElementsByTagName("field");
Post p = new Post();
p.setForum(getValue(fields, 0));
p.setDate(sf.parse(getValue(fields, 1)));
p.setSubject(getValue(fields, 2));
p.setUsername(getValue(fields, 4));
posts.add(p);
}
}
} catch (Exception e) {
e.printStackTrace();
}
}
PagingLoadResult<Post> getPosts(PagingLoadConfigBean config) {
return getPosts(config.getOffset(), config.getLimit(), config.getSortInfo());
}
public PostPagingLoadResultBean getPosts(int offset, int limit, List<SortInfoBean> sortInfo) {
List<Post> posts = new ArrayList<Post>(this.posts);
if (sortInfo.size() > 0) {
SortInfo sort = sortInfo.get(0);
if (sort.getSortField() != null) {
final String sortField = sort.getSortField();
if (sortField != null) {
Collections.sort(posts, sort.getSortDir().comparator(new Comparator<Post>() {
public int compare(Post p1, Post p2) {
if (sortField.equals("forum")) {
return p1.getForum().compareTo(p2.getForum());
} else if (sortField.equals("username")) {
return p1.getUsername().compareTo(p2.getUsername());
} else if (sortField.equals("subject")) {
return p1.getSubject().compareTo(p2.getSubject());
} else if (sortField.equals("date")) {
return p1.getDate().compareTo(p2.getDate());
}
return 0;
}
}));
}
}
}
ArrayList<Post> sublist = new ArrayList<Post>();
int start = offset;
int actualLimit = posts.size();
if (limit > 0) {
actualLimit = Math.min(start + limit, actualLimit);
}
for (int i = offset; i < actualLimit; i++) {
sublist.add(posts.get(i));
}
return new PostPagingLoadResultBean(sublist, posts.size(), offset);
}
private String getValue(NodeList fields, int index) {
NodeList list = fields.item(index).getChildNodes();
if (list.getLength() > 0) {
return list.item(0).getNodeValue();
} else {
return "";
}
}
}
<file_sep>/**
* Ext GWT 3.0.0-rc - Ext for GWT
* Copyright(c) 2007-2011, Sencha, Inc.
* <EMAIL>
*
* http://sencha.com/license
*/
package com.sencha.gxt.explorer.client.app.activity;
import com.google.gwt.activity.shared.AbstractActivity;
import com.google.gwt.event.shared.EventBus;
import com.google.gwt.place.shared.PlaceController;
import com.google.gwt.user.client.ui.AcceptsOneWidget;
import com.google.inject.Inject;
public class OverviewActivity extends AbstractActivity {
@Inject
PlaceController placeController;
@Override
public void start(AcceptsOneWidget panel, EventBus eventBus) {
}
}
<file_sep>/**
* Ext GWT 3.0.0-rc - Ext for GWT
* Copyright(c) 2007-2011, Sencha, Inc.
* <EMAIL>
*
* http://sencha.com/license
*/
package com.sencha.gxt.explorer.client.app.ui;
import com.google.gwt.user.client.ui.IsWidget;
import com.sencha.gxt.explorer.client.model.Example;
public interface ExampleDetailView extends IsWidget {
public interface Presenter {
void selectExample(Example ex);
}
void setPresenter(Presenter listener);
/**
* Focuses on the given example
*
* @param example
*/
void showExample(Example example);
}
<file_sep>/**
* Ext GWT 3.0.0-rc - Ext for GWT
* Copyright(c) 2007-2011, Sencha, Inc.
* <EMAIL>
*
* http://sencha.com/license
*/
package com.sencha.gxt.examples.resources.server;
import javax.servlet.ServletContextEvent;
import javax.servlet.ServletContextListener;
public class DataLoader implements ServletContextListener {
@Override
public void contextDestroyed(ServletContextEvent event) {
}
@Override
public void contextInitialized(ServletContextEvent event) {
MusicDataLoader.initMusic(event);
}
}
<file_sep>/**
* Ext GWT 3.0.0-rc - Ext for GWT
* Copyright(c) 2007-2011, Sencha, Inc.
* <EMAIL>
*
* http://sencha.com/license
*/
package com.sencha.gxt.explorer.client.app.ioc;
import com.google.gwt.inject.client.GinModules;
import com.google.gwt.inject.client.Ginjector;
import com.sencha.gxt.explorer.client.ExplorerApp;
@GinModules(ExplorerModule.class)
public interface ExplorerGinjector extends Ginjector {
ExplorerApp getApp();
}
<file_sep>/**
* Ext GWT 3.0.0-rc - Ext for GWT
* Copyright(c) 2007-2011, Sencha, Inc.
* <EMAIL>
*
* http://sencha.com/license
*/
package com.sencha.gxt.examples.resources.client.model;
import com.sencha.gxt.data.shared.ModelKeyProvider;
public class NameImageModel {
public static ModelKeyProvider<NameImageModel> KP = new ModelKeyProvider<NameImageModel>() {
@Override
public String getKey(NameImageModel item) {
return item.getName();
}
};
private String name;
private String image;
public String getImage() {
return image;
}
public NameImageModel(String name, String image) {
this.name = name;
this.image = image;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
<file_sep>/**
* Ext GWT 3.0.0-rc - Ext for GWT
* Copyright(c) 2007-2011, Sencha, Inc.
* <EMAIL>
*
* http://sencha.com/license
*/
package com.sencha.gxt.examples.resources.client.model;
public interface BrowserProxy {
public double getIE();
public String getDate();
public double getFirefox();
public double getChrome();
public double getSafari();
public double getOpera();
public double getOther();
public void setIE(double IE);
public void setDate(String date);
public void setFirefox(double Firefox);
public void setChrome(double Chrome);
public void setSafari(double Safari);
public void setOpera(double Opera);
public void setOther(double Other);
}
<file_sep>/**
* Ext GWT 3.0.0-rc - Ext for GWT
* Copyright(c) 2007-2011, Sencha, Inc.
* <EMAIL>
*
* http://sencha.com/license
*/
.text {
font-size: 11px;
}
.textLarge {
font-size: 14px;
}
.paddedText {
font-size: 11px;
padding: 5px;
}<file_sep>/**
* Ext GWT 3.0.0-rc - Ext for GWT
* Copyright(c) 2007-2011, Sencha, Inc.
* <EMAIL>
*
* http://sencha.com/license
*/
package com.sencha.gxt.explorer.client.grid;
import com.sencha.gxt.examples.resources.client.model.Plant;
import com.sencha.gxt.explorer.client.model.Example.Detail;
import com.sencha.gxt.widget.core.client.grid.Grid;
import com.sencha.gxt.widget.core.client.grid.editing.GridEditing;
import com.sencha.gxt.widget.core.client.grid.editing.GridRowEditing;
@Detail(name = "Row Editable Grid", icon = "roweditorgrid", category = "Grid", classes = {AbstractGridEditingExample.class, Plant.class})
public class RowEditingGridExample extends AbstractGridEditingExample {
@Override
protected GridEditing<Plant> createGridEditing(Grid<Plant> editableGrid) {
return new GridRowEditing<Plant>(editableGrid);
}
}<file_sep>/**
* Ext GWT 3.0.0-rc - Ext for GWT
* Copyright(c) 2007-2011, Sencha, Inc.
* <EMAIL>
*
* http://sencha.com/license
*/
package com.sencha.gxt.examples.resources.client.model;
public interface TaskProxy {
public void setId(int id);
public int getId();
public void setProject(String project);
public String getProject();
public void setTaskId(int taskId);
public int getTaskId();
public void setDescription(String description);
public String getDescription();
public void setEstimate(double estimate);
public double getEstimate();
public void setRate(double rate);
public double getRate();
public void setDue(String due);
public String getDue();
}
| 84c55a81cd1aa27b6bad990c53912262d7f375a1 | [
"Java",
"CSS"
] | 26 | Java | kerbymart/explorer | b95968f3a6e8837a5155017fead60d9c9f79a950 | bc47e3c2f7cf1b83f7e71979b190e2dba76e9fed |
refs/heads/master | <repo_name>svineet/monte-carlo-algo-analyser<file_sep>/README.md
Monte Carlo Algorithm Complexity Analyser
===
Trying to analyse average time complexity of algorithms via Monte Carlo + stats
[Associated blog post](https://medium.com/@svineet/analysing-the-covid-19-binary-search-testing-algorithm-via-monte-carlo-method-part-1-951049cdb212)
Usage
---
Open the notebook, you will find follow along explanations. Just add your algorithm, and a function that generates the required
input space.
<file_sep>/requirements.txt
pandas
numpy
matplotlib
dask
| c8af37a14b573a4684c63a66a702ba3dd46e4c6d | [
"Markdown",
"Text"
] | 2 | Markdown | svineet/monte-carlo-algo-analyser | c961630caee5286c8176ef88d794c145d4aaee9a | 2cd170c8c816888f96e4e6620027ff4cf9976584 |
refs/heads/master | <repo_name>mottosso/iscompatible<file_sep>/.coveragerc
[run]
source = iscompatible
[report]
include = *iscompatible*<file_sep>/docs/conf.py
# -*- coding: utf-8 -*-
import sys
import os
import sphinx
src_path = os.path.abspath('..')
if not src_path in sys.path:
sys.path.insert(0, src_path)
import iscompatible
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.viewcode',
'sphinx.ext.autodoc',
]
if sphinx.version_info >= (1, 3):
extensions.append('sphinx.ext.napoleon')
else:
extensions.append('sphinxcontrib.napoleon')
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'iscompatible'
copyright = u'2014, <NAME>'
version = iscompatible.__version__
release = version
exclude_patterns = []
pygments_style = 'sphinx'
# -- Options for HTML output ----------------------------------------------
if os.environ.get('READTHEDOCS', None) != 'True':
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
pass
html_static_path = ['_static']
htmlhelp_basename = 'iscompatibledoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {}
latex_documents = [
('index', 'iscompatible.tex', u'iscompatible Documentation',
u'<NAME>', 'manual'),
]
man_pages = [
('index', 'iscompatible', u'iscompatible Documentation',
[u'<NAME>'], 1)
]
texinfo_documents = [
('index', 'iscompatible', u'iscompatible Documentation',
u'<NAME>', 'iscompatible', 'Quality Assurance for Content',
'Miscellaneous'),
]
<file_sep>/docs/index.rst
.. automodule:: iscompatible
:members:
<file_sep>/README.md
### Python versioning with requirements.txt syntax
[![Build Status][travis]][travis_repo]
[![PyPI version][pypi]][pypi_repo]
[![Coverage Status][cover]][cover_repo]
iscompatible gives you the power of the [pip requirements.txt][req]
syntax for everyday python packages, modules, classes or arbitrary
functions. Supports Python 2.6-2.7 and Python 3.2-3.3, licensed under MIT.
[req]: https://pip.readthedocs.org/en/1.1/requirements.html
- [Documentation][]
- [Issue tracker][]
- [Wiki][]
[Wiki]: https://github.com/mottosso/iscompatible/wiki
[Issue tracker]: https://github.com/mottosso/iscompatible/issues
[Documentation]: http://iscompatible.readthedocs.org
<br>
### Install
iscompatible can be found via PyPI.
```bash
$ pip install iscompatible
```
<br>
### Usage
The module contains a single function called `iscompatible`.
```python
>>> from iscompatible import iscompatible
>>> iscompatible("foo>=5", (5, 6, 1))
True
>>> iscompatible("foo>=5.6.1, <5.7", (5, 0, 0))
False
>>> MyPlugin = type("MyPlugin", (), {'version': (5, 6, 1)})
>>> iscompatible("foo==5.6.1", MyPlugin.version)
True
```
<br>
### Example
The requirements.txt syntax allows you to specify inexact matches
between a set of requirements and a version. For example, let's
assume that the single package foo-5.6.1 exists on disk. The
following requirements are all compatible with foo-5.6.1.
|Requirement | Description
|------------|--------------------------------------------------
|foo |any version of foo
|foo>=5 |any version of foo, above or equal to 5
|foo>=5.6 |any version of foo, above or equal to 5.6
|foo==5.6.1 |exact match
|foo>5 |foo-5 or greater, including minor and patch
|foo>5, <5.7 |foo-5 or greater, but less than foo-5.7
|foo>0, <5.7 |any foo version less than foo-5.7
[travis]: https://travis-ci.org/mottosso/iscompatible.svg?branch=master
[travis_repo]: https://travis-ci.org/mottosso/iscompatible
[pypi]: https://badge.fury.io/py/iscompatible.svg
[pypi_repo]: http://badge.fury.io/py/iscompatible
[cover]: https://coveralls.io/repos/mottosso/iscompatible/badge.png?branch=master
[cover_repo]: https://coveralls.io/r/mottosso/iscompatible?branch=master
<file_sep>/README.txt
Python versioning with requirements.txt syntax
==============================================
iscompatible gives you the power of the pip requirements.txt
syntax for everyday python packages, modules, classes or arbitrary
functions. Supports Python 2.6+ and Python 3.1+, licensed under MIT.
See `the GitHub repository`_ for more information.
.. _the GitHub repository: https://github.com/mottosso/iscompatible<file_sep>/setup.py
from setuptools import setup
with open('README.txt') as f:
readme = f.read()
import os
import imp
mod_path = os.path.abspath('iscompatible.py')
mod = imp.load_source('iscompatible', mod_path)
version = mod.__version__
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities'
]
setup(
name='iscompatible',
version=version,
description='Python versioning with requirements.txt syntax',
long_description=readme,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/mottosso/iscompatible',
license="MIT",
py_modules=["iscompatible"],
zip_safe=False,
classifiers=classifiers
)
| 426f3e0ad71b02ef92a7110bf968be2a2cc20f11 | [
"reStructuredText",
"INI",
"Markdown",
"Python",
"Text"
] | 6 | reStructuredText | mottosso/iscompatible | b76397fd3616b538b15277b786d99c8f4fae8e3f | 83aa3733eee69021b2bfa2cdd0f89b5ecffe805f |
refs/heads/master | <file_sep><?php
class Model{
public function model3dInfo()
{
return array(
'name1' => 'coke',
'description1' => 'Coca Cola X3D Model',
'path1' => 'coke_top_tn',
'name2' => 'Coke Can 3D Image 2',
'description2' => 'Sprite X3D Model',
'path2' => 'coke_top_tn2',
'name3' => 'Sprite Bottle 3D Image 1',
'description3' => 'Dr Pepper X3D Model',
'path3' => 'coke_top_tn3',
);
}
}
?>
| ed4b6542ddb58423a676d53e4204af0505d474b7 | [
"PHP"
] | 1 | PHP | koreex/x3d-cocacola | b3d9b2bac27fd44d3c8a1661cb410a6c78f9729e | cd1e72b49245fe1c5683dfe56f3c9b4449fdf1fb |
refs/heads/master | <repo_name>leosaysger/git_mirror<file_sep>/README.md
# git_mirror
This is a mirror of commits
| 70110963b8fc96970ef6752e3b1515b25d3a4928 | [
"Markdown"
] | 1 | Markdown | leosaysger/git_mirror | 0c1b52203cd0826f3b6c5e023baa171e62fc3866 | 4dc128eb932ba9ea20ad544a77ce89621c134b40 |
refs/heads/master | <file_sep>var mongoose = require('mongoose');
var Schema = mongoose.Schema;
var term = new Schema({
title: String,
definition: String,
exampleText: String,
category: [String],
notNato: Boolean,
differsFromNato: Boolean,
});
module.exports = mongoose.model('Term', term);
| c457d82b547972d3b5695f612f9e23750070edd5 | [
"JavaScript"
] | 1 | JavaScript | DaveDailey/brevity-api | 6cb4deff74d51d6e1c3072c62403c79be0821ea0 | 7cf99671608f1ae878787108697e0843dd4d8de9 |
refs/heads/master | <file_sep>mama are mere
si tata are mere
| a7dbd757ca4e5918ccc1985a9bc42342f89c4df6 | [
"Text"
] | 1 | Text | vlddamian2/hellogit | c915f5241f42f0073e24ffef652359886f2b473f | 5b9b1a6f8cdbed0d0e2e343b44abb6c8d2d48abe |
refs/heads/master | <repo_name>LukasLewandowski/Web_Design_tutorial_Soundwave<file_sep>/styles.css
@import url("https://fonts.googleapis.com/css2?family=Poppins:400,500,700");
*,
*::before,
*::after {
box-sizing: border-box;
}
body {
padding: 0;
margin: 0;
background-color: #2f303a;
font-family: Poppins, sans-serif;
color: white;
/* 100vh (view hight) = 100% of the height of the screen */
min-height: 100vh;
}
/* https://www.youtube.com/watch?v=RZ-Oe4_Ew7g&t=7s 11:00 */
.full-height-grow {
display: flex;
flex-direction: column;
}
.brand-logo {
display: flex;
align-items: center;
font-size: 1.25em;
color: inherit;
text-decoration: none;
}
.brand-logo-name {
margin-left: 1rem;
}
.main-nav ul, .footer-nav ul {
display: flex;
margin: 0;
padding: 0;
list-style: none;
}
.main-nav a, .footer-nav a {
color: inherit;
text-decoration: none;
padding: 1rem;
}
.main-nav a:hover, .footer-nav a:hover {
color: #adadad;
}
.main-header, .main-footer{
display: flex;
/* gives as much space as it can between elements */
justify-content: space-between;
align-items: center;
}
.main-header {
height: 140px;
}
.main-footer {
height: 70px;
background-color: #202027;
font-weight: bold;
}
.social-link img {
/* image will have exact size of the text - this will align icons with text */
width: 1em;
margin-right: .25rem;
}
.main-footer .container {
display: flex;
justify-content: space-between;
}
.container {
max-width: 1200px;
/* margin 0 for top and bottom and auto for left and right which will center content if above 1200pz*/
margin: 0 auto;
padding: 0 40px;
/* container will grow to full height */
flex-grow: 1;
}
.title {
font-weight: 500;
font-size: 3em;
margin-bottom: 1rem;
margin-top: 0;
}
.subtitle {
font-weight: 500;
font-size: 1.2em;
margin-bottom: 2rem;
}
.btn {
color: white;
background-color: #1762A7;
padding: .75rem 1.5rem;
border-radius: .5rem;
text-decoration: none;
font-size: .9rem;
} | 222eb7840916d6d349620feeb8edfd1d24c8b930 | [
"CSS"
] | 1 | CSS | LukasLewandowski/Web_Design_tutorial_Soundwave | 3105c4e38ffb869969ca5530f515af7c3242952a | 4f62177f9729b6f042cad016cd929b098bfcc074 |
refs/heads/master | <file_sep># MyCaptchaTest
this is my test
<file_sep><?php
namespace xiaokeke\chaptcha;
class User{
public static function login($username,$password){
if($username=="xiaokeke"&&$password=="<PASSWORD>"){
return "login success";
}else{
return "login failed";
}
}
}
?><file_sep><?php
require_once "vendor/autoload.php";
$username="xiaokeke";
$password="<PASSWORD>";
$res=xiaokeke\chaptcha\User::login($username,$password);
echo $res;
?> | d76b1a4251b99e99e9b555c6740acd370750114c | [
"Markdown",
"PHP"
] | 3 | Markdown | dongdengke/MyCaptchaTest | 147cea0113eb46c5f98ecd69baacf2d01c8aa73b | c69f6c245c6f6531e1d46f9cd131bc48ecbd1eb5 |
refs/heads/master | <file_sep># ERPsolution
Enterprice Resource Planning solution
| 2b2830c2786b9f4db0c6e466109d3e4041ea5516 | [
"Markdown"
] | 1 | Markdown | TEKsolution/ERPsolution | 035062134a46dbfe02934cf9db6b5c8ca845e61e | 5266f201749d3e021a04cccc481f8dad77814863 |
refs/heads/main | <repo_name>qaz852tgb963/Three_TimByDongYi<file_sep>/Three/Assets/Scripts/NumPerMsnager.cs
using UnityEngine;
public class NumPerMsnager : MonoBehaviour
{
#region 屬性
[Header("生成的數字紙")]
public RectTransform NumPer_Instant;
[Header("生成的數字紙位置")]
public Vector2[] V2NumPer_L = {
new Vector2(-224,24),
new Vector2(-74,24),
new Vector2(76,24),
new Vector2(226,24),
new Vector2(-224,-126),
new Vector2(-74,-126),
new Vector2(76,-126),
new Vector2(226,-126),
new Vector2(-224,-276),
new Vector2(-74,-276),
new Vector2(76,-276),
new Vector2(226,-276),
new Vector2(-224,-426),
new Vector2(-74,-426),
new Vector2(76,-426),
new Vector2(226,-426),
};
#endregion
}
<file_sep>/Three/Assets/Scripts/NumPer.cs
using UnityEngine;
public class NumPer : MonoBehaviour
{
#region 屬性
[Header("是否碰到牆壁_上下左右")]
public bool bHitWallUp;
public bool bHitWallDown;
public bool bHitWallLeft;
public bool bHitWallRight;
[Header("是否碰到其他數字格子_上下左右")]
public bool bHitNumPerUp;
public bool bHitNumPerDown;
public bool bHitNumPerLeft;
public bool bHitNumPerRight;
[Header("射線長度")]
public float fHitLine = 90f;
//自己
private RectTransform rect;
#endregion
#region 方法
//private void SettingLen()
//{
// Gizmos.color = Color.black;
// Gizmos.DrawRay(transform.position, Vector3.up * fHitLine);
// Gizmos.color = Color.blue;
// Gizmos.DrawRay(transform.position, Vector3.down * fHitLine);
// Gizmos.color = Color.yellow;
// Gizmos.DrawRay(transform.position, Vector3.left * fHitLine);
// Gizmos.color = Color.red;
// Gizmos.DrawRay(transform.position, Vector3.right * fHitLine);
//}
public void CheckWall()
{
RaycastHit2D HitWallUp = Physics2D.Raycast(transform.position, Vector2.up, fHitLine, 1 << 11);
RaycastHit2D HitWallDown= Physics2D.Raycast(transform.position, Vector2.down, fHitLine, 1 << 11);
RaycastHit2D HitWallLeft= Physics2D.Raycast(transform.position, Vector2.left, fHitLine, 1 << 11);
RaycastHit2D HitWallRight= Physics2D.Raycast(transform.position, Vector2.right, fHitLine, 1 << 11);
if (HitWallUp && HitWallUp.transform.name == "牆壁_上")
{
bHitWallUp = true;
}
else
{
bHitWallUp = false;
}
if (HitWallDown && HitWallDown.transform.name == "牆壁_下")
{
bHitWallDown = true;
}
else
{
bHitWallDown = false;
}
if (HitWallLeft && HitWallLeft.transform.name == "牆壁_左")
{
bHitWallLeft = true;
}
else
{
bHitWallLeft = false;
}
if (HitWallRight && HitWallRight.transform.name == "牆壁_右")
{
bHitWallRight = true;
}
else
{
bHitWallRight = false;
}
}
public void CheckNumPer()
{
RaycastHit2D HitNumPerUp = Physics2D.Raycast(transform.position, Vector2.up, fHitLine, 1 << 12);
RaycastHit2D HitNumPerDown= Physics2D.Raycast(transform.position, Vector2.down, fHitLine, 1 << 12);
RaycastHit2D HitNumPerLeft= Physics2D.Raycast(transform.position, Vector2.left, fHitLine, 1 << 12);
RaycastHit2D HitNumPerRight= Physics2D.Raycast(transform.position, Vector2.right, fHitLine, 1 << 12);
if (HitNumPerUp && HitNumPerUp.transform.tag == "NumPer_Tag")
{
bHitNumPerUp = true;
}
else
{
bHitNumPerUp = false;
}
if (HitNumPerDown && HitNumPerDown.transform.tag == "NumPer_Tag")
{
bHitNumPerDown = true;
}
else
{
bHitNumPerDown = false;
}
if (HitNumPerLeft && HitNumPerLeft.transform.tag == "NumPer_Tag")
{
bHitNumPerLeft = true;
}
else
{
bHitNumPerLeft = false;
}
if (HitNumPerRight && HitNumPerRight.transform.tag == "NumPer_Tag")
{
bHitNumPerRight = true;
}
else
{
bHitNumPerRight = false;
}
}
#endregion
#region 事件
private void Start()
{
rect = GetComponent<RectTransform>();
}
private void Update()
{
CheckWall();
CheckNumPer();
}
//private void OnDrawGizmos()
//{
// settingLen();
//}
#endregion
}
<file_sep>/README.md
# Three_TimByDongYi
Threes! Freeplay 私服APP
| ddc8df6b2cb078da9f4dbf3dd87ad4bddfe8a93f | [
"C#",
"Markdown"
] | 3 | C# | qaz852tgb963/Three_TimByDongYi | a34836a87b68c4221f883222687b4dec8a1a88b5 | cf4ca9661a8dbcfeb310ab70958620835f09e872 |
refs/heads/master | <file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 6/7/2017
* License: Apache License 2.0
*/
require_once("../pointsManager.php");
/**
* Main get users process
* @param $requestData
* @param $mySQLConnection
* @param $objectJSON
* @return mixed
* @internal param $rPData
*/
function getUsersProcess($requestData, $mySQLConnection, $objectJSON)
{
if (isValidTokenWithID($requestData->adminID, $requestData->adminToken, $mySQLConnection)) {
if (isAdminWithID($requestData->adminID, $mySQLConnection)) {
$mySQLQuery = "SELECT * FROM users";
/** @noinspection PhpUndefinedMethodInspection */
$Result = $mySQLConnection->query($mySQLQuery);
$objectJSON->Status = SUCCESS;
$objectJSON->Message = MsgAdminGetUsersSuccess;
$index = 0;
/** @noinspection PhpUndefinedMethodInspection */
while ($rows = $Result->fetch_assoc()) {
$objectJSON->User[$index]->id = $rows['id'];
$objectJSON->User[$index]->firstName = $rows['firstName'];
$objectJSON->User[$index]->lastName = $rows['lastName'];
$objectJSON->User[$index]->username = $rows['username'];
$objectJSON->User[$index]->eMail = $rows['eMail'];
$objectJSON->User[$index]->phone = $rows['phone'];
$objectJSON->User[$index]->currentPoints = $rows['currentPoints'];
$objectJSON->User[$index]->gainedPoints = $rows['gainedPoints'];
$objectJSON->User[$index]->spendedPoints = $rows['spendedPoints'];
$objectJSON->User[$index]->loginAttempts = $rows['loginAttempts'];
$objectJSON->User[$index]->status = $rows['status'];
$objectJSON->User[$index]->role = $rows['role'];
$objectJSON->User[$index]->creationDate = $rows['creationDate'];
$objectJSON->User[$index]->unlockDate = $rows['unlockDate'];
$index++;
}
} else {
$objectJSON->Status = FORBIDDEN;
$objectJSON->Message = MsgForbidden;
}
} else {
$objectJSON->Status = UNAUTHORIZED;
$objectJSON->Message = MsgUnauthorized;
}
return $objectJSON;
}<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 2/7/2017
* License: Apache License 2.0
*/
/**
* Check if user's id is used
* @param $id
* @param $mySQLConnection
* @return bool
*/
function isIDUsed($id, $mySQLConnection)
{
$mySQLQuery = "SELECT id FROM users WHERE id = '$id'";
/** @noinspection PhpUndefinedMethodInspection */
if (mysqli_num_rows($mySQLConnection->query($mySQLQuery)) >= 1) {
return true;
}
return false;
}
/**
* Check if user's userName is used
* @param $username
* @param $mySQLConnection
* @return bool
*/
function isUsernameUsed($username, $mySQLConnection)
{
$mySQLQuery = "SELECT username FROM users WHERE username = '$username'";
/** @noinspection PhpUndefinedMethodInspection */
if (mysqli_num_rows($mySQLConnection->query($mySQLQuery)) >= 1) {
return true;
}
return false;
}
/**
* Check if user's eMail is used
* @param $eMail
* @param $mySQLConnection
* @return bool
*/
function isMailUsed($eMail, $mySQLConnection)
{
$mySQLQuery = "SELECT eMail FROM users WHERE eMail = '$eMail'";
/** @noinspection PhpUndefinedMethodInspection */
if (mysqli_num_rows($mySQLConnection->query($mySQLQuery)) >= 1) {
return true;
}
return false;
}
/**
* Check if user's id is valid
* @param $id
* @param $mySQLConnection
* @return bool
*/
function isIDValid($id, $mySQLConnection)
{
return isIDUsed($id, $mySQLConnection);
}
/**
* Check if user's username meets requirements
* @param $username
* @return bool
*/
function isValidUsername($username)
{
$namesSize = 0;
$blockedNames[$namesSize] = "root";
$namesSize++;
$blockedNames[$namesSize] = "admin";
$namesSize++;
for ($i = 0; $i < $namesSize; $i++) {
if ($username == $blockedNames[$i]) {
return false;
}
}
return true;
}
/**
* Check if user's eMail meets requirements
* @param $eMail
* @return bool
*/
function isValidMail($eMail)
{
if (filter_var("$eMail", FILTER_VALIDATE_EMAIL)) {
return true;
}
return false;
}
/**
* Check if user's phone meets requirements
* @param phone
* @return bool
*/
function isValidPhone($phone)
{
if (strlen($phone) == 10 && is_numeric($phone)) {
return true;
}
return false;
}
/**
* Check if user's password meets requirements
* @param $password
* @return bool
*/
function isValidPassword($password)
{
if (strlen($password) > 8) {
return true;
}
return false;
}
/**
* Check if provided token is valid based on the user's id
* @param $id
* @param $token
* @param $mySQLConnection
* @return bool
*/
function isValidTokenWithID($id, $token, $mySQLConnection)
{
if (hash(hashMethod, $token) == getHashedTokenByID($id, $mySQLConnection)) {
return true;
}
return false;
}
/**
* Check if provided token is valid based on the user's eMail
* @param $eMail
* @param $token
* @param $mySQLConnection
* @return bool
*/
function isValidTokenWithMail($eMail, $token, $mySQLConnection)
{
return isValidTokenWithID(getIDFromMail($eMail, $mySQLConnection), $token, $mySQLConnection);
}
/**
* Check if provided token is valid based on the user's userName
* @param $username
* @param $token
* @param $mySQLConnection
* @return bool
*/
function isValidTokenWithUsername($username, $token, $mySQLConnection)
{
return isValidTokenWithID(getIDFromUsername($username, $mySQLConnection), $token, $mySQLConnection);
}
/**
* Check if provided user is enabled on the user's id
* @param $id
* @param $mySQLConnection
* @return bool
*/
function isUserEnabledWithID($id, $mySQLConnection)
{
$mySQLQuery = "SELECT status FROM users WHERE id = '$id'";
/** @noinspection PhpUndefinedMethodInspection */
$mySQLResult = $mySQLConnection->query($mySQLQuery);
$mySQLResult = mysqli_fetch_array($mySQLResult);
if ($mySQLResult['status'] == 'enabled') {
return true;
}
return false;
}
/**
* Check if provided user is enabled on the user's id
* @param $eMail
* @param $mySQLConnection
* @return bool
*/
function isUserEnabledWithMail($eMail, $mySQLConnection)
{
return isUserEnabledWithID(getIDFromMail($eMail, $mySQLConnection), $mySQLConnection);
}
/**
* Check if provided user is enabled on the user's id
* @param $username
* @param $mySQLConnection
* @return bool
*/
function isUserEnabledWithUsername($username, $mySQLConnection)
{
return isUserEnabledWithID(getIDFromUsername($username, $mySQLConnection), $mySQLConnection);
}
/**
* Check if provided user is admin based on the user's id
* @param $id
* @param $mySQLConnection
* @return bool
*/
function isAdminWithID($id, $mySQLConnection)
{
$mySQLQuery = "SELECT role FROM users WHERE id = '$id'";
/** @noinspection PhpUndefinedMethodInspection */
$mySQLResult = $mySQLConnection->query($mySQLQuery);
$mySQLResult = mysqli_fetch_array($mySQLResult);
if ($mySQLResult['role'] == "admin") {
return true;
}
return false;
}
/**
* Check if provided user is admin based on the user's eMail
* @param $eMail
* @param $mySQLConnection
* @return bool
*/
function isAdminWithMail($eMail, $mySQLConnection)
{
return isAdminWithID(getIDFromMail($eMail, $mySQLConnection), $mySQLConnection);
}
/**
* Check if provided user is admin based on the user's userName
* @param $username
* @param $mySQLConnection
* @return bool
*/
function isAdminWithUsername($username, $mySQLConnection)
{
return isAdminWithID(getIDFromUsername($username, $mySQLConnection), $mySQLConnection);
}
/**
* Search and return user's id based on user's userName
* @param $username
* @param $mySQLConnection
* @return string
*/
function getIDFromUsername($username, $mySQLConnection)
{
$mySQLQuery = "SELECT id FROM users WHERE userName='$username'";
/** @noinspection PhpUndefinedMethodInspection */
$mySQLResult = $mySQLConnection->query($mySQLQuery);
$mySQLResult = mysqli_fetch_array($mySQLResult);
return $mySQLResult['id'];
}
/**
* Search and return user's id based on user's eMail
* @param $eMail
* @param $mySQLConnection
* @return mixed
*/
function getIDFromMail($eMail, $mySQLConnection)
{
$mySQLQuery = "SELECT id FROM users WHERE eMail='$eMail'";
/** @noinspection PhpUndefinedMethodInspection */
$mySQLResult = $mySQLConnection->query($mySQLQuery);
$mySQLResult = mysqli_fetch_array($mySQLResult);
return $mySQLResult['id'];
}
/**
* Search and return user's username based on user's id
* @param $id
* @param $mySQLConnection
* @return mixed
*/
function getUsernameFromID($id, $mySQLConnection)
{
$mySQLQuery = "SELECT username FROM users WHERE id='$id'";
/** @noinspection PhpUndefinedMethodInspection */
$mySQLResult = $mySQLConnection->query($mySQLQuery);
$mySQLResult = mysqli_fetch_array($mySQLResult);
return $mySQLResult['userName'];
}
/**
* Search and return user's username based on user's eMail
* @param $eMail
* @param $mySQLConnection
* @return mixed
*/
function getUsernameFromMail($eMail, $mySQLConnection)
{
$mySQLQuery = "SELECT username FROM users WHERE eMail='$eMail'";
/** @noinspection PhpUndefinedMethodInspection */
$mySQLResult = $mySQLConnection->query($mySQLQuery);
$mySQLResult = mysqli_fetch_array($mySQLResult);
return $mySQLResult['userName'];
}
/**
* Search and return user's eMail based on user's id
* @param $id
* @param $mySQLConnection
* @return mixed
*/
function getMailFromID($id, $mySQLConnection)
{
$mySQLQuery = "SELECT eMail FROM users WHERE id='$id'";
/** @noinspection PhpUndefinedMethodInspection */
$mySQLResult = $mySQLConnection->query($mySQLQuery);
$mySQLResult = mysqli_fetch_array($mySQLResult);
return $mySQLResult['eMail'];
}
/**
* Search and return user's eMail based on user's username
* @param $username
* @param $mySQLConnection
* @return mixed
*/
function getMailFromUsername($username, $mySQLConnection)
{
$mySQLQuery = "SELECT eMail FROM users WHERE username='$username'";
/** @noinspection PhpUndefinedMethodInspection */
$mySQLResult = $mySQLConnection->query($mySQLQuery);
$mySQLResult = mysqli_fetch_array($mySQLResult);
return $mySQLResult['eMail'];
}<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 2/8/2017
* License: Apache License 2.0
*/
// Admin Messages
define("MsgAdminAddPointsSuccess", "You have added those points into this user !");
define("MsgAdminAddPointsWrongID", "There is no user with this ID !");
define("MsgAdminAddPointsEmptyFields", "To add points, you must fill in all fields !");
// Developer Messages
define("MsgAdminAddPointsEmptyPOST", "You must give adminID, adminToken, userID and pointsToAdd using POST method !");
<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 2/8/2017
* License: Apache License 2.0
*/
// Admin Messages
define("MsgAdminRemovePointsSuccess", "You have removed those points from this user !");
define("MsgAdminRemovePointsNotEnoughPoints", "This user have less points, from points you want remove !");
define("MsgAdminRemovePointsWrongID", "There is no user with this ID !");
define("MsgAdminRemovePointsEmptyFields", "To remove points, you must fill in all fields !");
// Developer Messages
define("MsgAdminRemovePointsEmptyPOST", "You must give adminID, adminToken, userID and pointsToRemove using POST method !");
<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 3/7/2017
* License: Apache License 2.0
*/
// Generic
define("MsgDBCantConnect", "We can't connect with database !");
define("MsgMethodNotAllowed", "The system accepts only POST requests!");
define("MsgUnauthorized", "You are unauthorized !");
define("MsgForbidden", "You are not a admin !");
define("MsgInvalidUsername", "This is not a valid username !");
define("MsgInvalidPhone", "This is not a valid phone !");
define("MsgInvalidMail", "This is not a valid e-mail !");
define("MsgInvalidPassword", "<PASSWORD> !");
// Services
require_once("auth/index.php");
require_once("user/index.php");
require_once("admin/index.php");<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 2/8/2017
* License: Apache License 2.0
*/
// Admin Messages
define("MsgAdminRemoveUserSuccess", "You have removed this user !");
define("MsgAdminRemoveUserWrongID", "There is no user with this ID !");
define("MsgAdminRemoveUserEmptyField", "To remove user, you must fill in the field !");
// Developer Messages
define("MsgAdminRemoveUserEmptyPOST", "You must give adminID, adminToken and userID using POST method !");
<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 4/7/2017
* License: Apache License 2.0
*/
/**
* Create and return a random password
* @param int $length
* @return string
*/
function createPassword($length = 10)
{
switch (PHP_MAJOR_VERSION) {
case 7: {
return base64_encode(random_bytes($length));
break;
}
default: {
$characters = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ';
$randomString = '';
for ($i = 0; $i < $length; $i++) {
$randomString .= $characters[rand(0, strlen($characters))];
}
return $randomString;
}
}
}
/**
* Create and return a random passwordSalt
* @return string
*/
function createPasswordSalt()
{
return dechex(mt_rand(0, 2147483647)) . dechex(mt_rand(0, 2147483647));
}
/**
* Encrypt and return the encrypted password
* @param $password
* @param $passwordSalt
* @return string
*/
function encryptPassword($password, $passwordSalt)
{
$encryptedPassword = $password;
for ($i = 0; $i <= hashTimes; $i++) {
$encryptedPassword = hash(hashMethod, $encryptedPassword . $passwordSalt);
}
return $encryptedPassword;
}
/**
* Create and return a passwordCrypt pair
* @param $password
* @return mixed, encryptedPassword and passwordSalt
*/
function createPasswordCrypt($password)
{
$passwordSalt = createPasswordSalt();
$passwordCrypt = new stdClass();
$passwordCrypt->encryptedPassword = encryptPassword($password, $passwordSalt);
$passwordCrypt->passwordSalt = $passwordSalt;
return $passwordCrypt;
}
/**
* Check if a password is equal to passwordCrypt
* @param $password
* @param $passwordCrypt
* @return bool
*/
function isEqualPass($password, $passwordCrypt)
{
if ($passwordCrypt->encryptedPassword == encryptPassword($password, $passwordCrypt->passwordSalt)) {
return true;
}
return false;
}
/**
* Search and return the user's passwordCrypt pair based on user's id
* @param $id
* @param $mySQLConnection
* @return mixed, encryptedPassWord and passwordSalt
*/
function getPasswordCryptByID($id, $mySQLConnection)
{
$mySQLQuery = "SELECT password, passwordSalt FROM users WHERE id='$id'";
/** @noinspection PhpUndefinedMethodInspection */
$mySQLResult = $mySQLConnection->query($mySQLQuery);
$mySQLResult = mysqli_fetch_array($mySQLResult);
$passwordCrypt = new stdClass();
$passwordCrypt->encryptedPassword = $mySQLResult['password'];
$passwordCrypt->passwordSalt = $mySQLResult['passwordSalt'];
return $passwordCrypt;
}
/**
* Search and return the user's passCrypt pair based on user's userName
* @param $userName
* @param $mySQLConnection
* @return mixed, encryptedPassWord and passSalt
*/
function getPassCryptByUserName($userName, $mySQLConnection)
{
return getPasswordCryptByID(getIDFromUsername($userName, $mySQLConnection), $mySQLConnection);
}
/**
* Search and return the user's passCrypt pair based on user's eMail
* @param $eMail
* @param $mySQLConnection
* @return mixed, encryptedPassWord and passSalt
*/
function getPassCryptByMail($eMail, $mySQLConnection)
{
return getPasswordCryptByID(getIDFromMail($eMail, $mySQLConnection), $mySQLConnection);
}
/**
* Create tokenCrypt for user authentication based on password and eMail
* @param $eMail
* @param $password
* @return mixed, token and hashedToken
*/
function createTokenCrypt($eMail, $password)
{
$tokenCrypt = new stdClass();
$tokenCrypt->token = hash(hashMethod, $eMail . $password);
$tokenCrypt->hashedToken = hash(hashMethod, $tokenCrypt->token);
return $tokenCrypt;
}
/**
* Search and return user's hashed token based on user's id
* @param $id
* @param $mySQLConnection
* @return mixed
*/
function getHashedTokenByID($id, $mySQLConnection)
{
$mySQLQuery = "SELECT token FROM users WHERE id='$id'";
/** @noinspection PhpUndefinedMethodInspection */
$mySQLResult = $mySQLConnection->query($mySQLQuery);
$mySQLResult = mysqli_fetch_array($mySQLResult);
return $mySQLResult['token'];
}
/**
* Search and return user's hashed token based on user's username
* @param $username
* @param $mySQLConnection
* @return mixed
*/
function getHashedTokenByUsername($username, $mySQLConnection)
{
return getHashedTokenByID(getIDFromUsername($username, $mySQLConnection), $mySQLConnection);
}
/**
* Search and return user's hashed token based on user's eMail
* @param $eMail
* @param $mySQLConnection
* @return mixed
*/
function getHashedTokenByMail($eMail, $mySQLConnection)
{
return getHashedTokenByID(getIDFromMail($eMail, $mySQLConnection), $mySQLConnection);
}
/**
* Check if provided plain passwords is equal
* @param $passWord
* @param $passWordValidate
* @return bool
*/
function isPassWordEqual($passWord, $passWordValidate)
{
if ($passWord == $passWordValidate) {
return true;
}
return false;
}<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 5/7/2017
* License: Apache License 2.0
*/
require_once("../../../config.php");
require_once("core.php");
if (isPostMethod($_SERVER['REQUEST_METHOD'])) {
if (isset($_POST['userID'], $_POST['userToken'], $_POST['oldPassword'], $_POST['newPassword'])) {
if (isPostValuesEmpty($_POST)) {
$objectJSON->Status = EMPTY_FIELDS;
$objectJSON->Message = MsgAuthChangePasswordEmptyFields;
} else {
$requestData = new stdClass();
$requestData->userID = $_POST['userID'];
$requestData->userToken = $_POST['userToken'];
$requestData->oldPassword = $_POST['<PASSWORD>'];
$requestData->newPassword = $_POST['<PASSWORD>'];
$requestData->newPasswordValidate = $_POST['newPasswordValidate'];
$objectJSON = changePasswordProcess($requestData, $mySQLConnection, $objectJSON);
}
} else {
$objectJSON->Status = INVALID_REQUEST;
$objectJSON->Message = MsgAuthChangePasswordEmptyPOST;
}
} else {
$objectJSON->Status = METHOD_NOT_ALLOWED;
$objectJSON->Message = MsgMethodNotAllowed;
}
closeMySQLConnection($mySQLConnection);
printJSON($objectJSON);<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 6/7/2017
* License: Apache License 2.0
*/
require_once("../../../config.php");
require_once ("core.php");
if(isPostMethod($_SERVER['REQUEST_METHOD'])) {
if (isset($_POST['adminID'], $_POST['adminToken'])) {
$requestData = new stdClass();
$requestData->adminID = $_POST['adminID'];
$requestData->adminToken = $_POST['adminToken'];
$objectJSON = getUsersProcess($requestData, $mySQLConnection, $objectJSON);
} else {
$objectJSON->Status = INVALID_REQUEST;
$objectJSON->Message = MsgAdminGetUsersEmptyPOST;
}
} else {
$objectJSON->Status = METHOD_NOT_ALLOWED;
$objectJSON->Message = MsgMethodNotAllowed;
}
closeMySQLConnection($mySQLConnection);
printJSON($objectJSON);<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 6/7/2017
* License: Apache License 2.0
*/
require_once("../pointsManager.php");
/**
* Main add points process
* @param $requestData
* @param $mySQLConnection
* @param $objectJSON
* @return mixed
*/
function addPointsProcess($requestData, $mySQLConnection, $objectJSON)
{
if (isIDUsed($requestData->userID, $mySQLConnection)) {
// Calculate user's new points
$newPoints = getPointsFromID($requestData->userID, $mySQLConnection) + $requestData->pointsToAdd;
// Add history record for this add process
addHistoryRecord($requestData->userID, "add", $newPoints, $mySQLConnection);
// Update user's points
$mySQLQuery = "UPDATE users SET currentPoints='$newPoints', gainedPoints=gainedPoints+'$requestData->pointsToAdd' WHERE id='$requestData->userID'";
/** @noinspection PhpUndefinedMethodInspection */
$mySQLConnection->query($mySQLQuery);
$objectJSON->Status = SUCCESS;
$objectJSON->Message = MsgAdminAddPointsSuccess;
} else {
$objectJSON->Status = INVALID_FIELDS;
$objectJSON->Message = MsgAdminAddPointsWrongID;
}
return $objectJSON;
}<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 2/8/2017
* License: Apache License 2.0
*/
require_once("addUser.php");
require_once("removeUser.php");
require_once("banUser.php");
require_once("unBanUser.php");
require_once("getUsers.php");
require_once("getUser.php");
require_once("addPoints.php");
require_once("removePoints.php");<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 3/7/2017
* License: Apache License 2.0
*/
require_once("../../../config.php");
require_once("core.php");
if (isPostMethod($_SERVER['REQUEST_METHOD'])) {
if (isset($_POST['eMail'], $_POST['username'])) {
if (isPostValuesEmpty($_POST)) {
$objectJSON->Status = EMPTY_FIELDS;
$objectJSON->Message = MsgAuthRecoveryEmptyField;
} else {
$requestData = new stdClass();
$requestData->username = $_POST['username'];
$requestData->eMail = $_POST['eMail'];
$objectJSON = recoveryProcess($requestData, $mySQLConnection, $objectJSON);
}
} else {
$objectJSON->Status = INVALID_REQUEST;
$objectJSON->Message = MsgAuthRecoveryEmptyPOST;
}
} else {
$objectJSON->Status = METHOD_NOT_ALLOWED;
$objectJSON->Message = MsgMethodNotAllowed;
}
closeMySQLConnection($mySQLConnection);
printJSON($objectJSON);<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 2/8/2017
* License: Apache License 2.0
*/
// User Messages
define("MsgAuthRecoverySuccess", "Check your e-mail for the new password !");
define("MsgAuthRecoveryWrongMail", "There is no user with this e-mail ! Try again !");
define("MsgAuthRecoveryWrongUsername", "There is no user with this username ! Try again !");
define("MsgAuthRecoveryEmptyField", "To recover your account, you must fill in all fields !");
// Developer Messages
define("MsgAuthRecoveryEmptyPOST", "You must give userName and eMail using POST method !");
<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 2/8/2017
* License: Apache License 2.0
*/
// Admin Messages
define("MsgAdminGetUserSuccess", "We have found the user !");
define("MsgAdminGetUserWrongID", "There is no user with this ID !");
define("MsgAdminGetUserEmptyField", "To get user, you must fill in the field !");
// Developer Messages
define("MsgAdminGetUserEmptyPOST", "You must give adminID and adminToken using POST method !");<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 5/7/2017
* License: Apache License 2.0
*/
/**
* Add a record for this login process on loginlog table
* @param $eMail
* @param $loginResult
* @param $mySQLConnection
*/
function addLoginRecord($eMail, $loginResult, $mySQLConnection)
{
$userID = getIDFromMail($eMail, $mySQLConnection);
$userName = getUsernameFromID($userID, $mySQLConnection);
$userIP = $_SERVER['REMOTE_ADDR'];
if($loginResult == SUCCESS) {
$attemptResult = "success";
} else {
$attemptResult = "failure";
}
$mySQLQuery = "INSERT INTO loginlog (ip, userID, userName, eMail, attemptResult) VALUES ('$userIP', '$userID', '$userName', '$eMail', '$attemptResult')";
/** @noinspection PhpUndefinedMethodInspection */
$mySQLConnection->query($mySQLQuery);
}
/**
* Main login process
* @param $requestData
* @param $mySQLConnection
* @param $objectJSON
* @return mixed
*/
function loginProcess($requestData, $mySQLConnection, $objectJSON)
{
if (isValidMail($requestData->eMail)) {
if (isValidPassword($requestData->password)) {
if (isMailUsed($requestData->eMail, $mySQLConnection)) {
if (userCanInteract($requestData->eMail, $mySQLConnection)) {
$userID = getIDFromMail($requestData->eMail, $mySQLConnection);
$passwordCrypt = getPasswordCryptByID($userID, $mySQLConnection);
if (isEqualPass($requestData->password, $passwordCrypt)) {
resetUnsuccessfulAttempts($requestData->eMail, $mySQLConnection);
$tokenCrypt = createTokenCrypt($requestData->eMail, $requestData->password);
$tokenCrypt->hashedToken = md5("null");
$objectJSON->Status = SUCCESS;
$objectJSON->Token = $tokenCrypt->token;
$objectJSON->Message = MsgAuthLoginSuccess;
} else {
addUnsuccessfulAttempt($requestData->eMail, $mySQLConnection);
$objectJSON->Status = UNAUTHORIZED;
$objectJSON->Message = MsgAuthLoginWrongPass;
}
} else {
$objectJSON->Status = UNAUTHORIZED;
$objectJSON->Message = MsgAuthLoginBruteForce;
}
} else {
$objectJSON->Status = INVALID_FIELDS;
$objectJSON->Message = MsgAuthLoginWrongMail;
}
} else {
$objectJSON->Status = INVALID_FIELDS;
$objectJSON->Message = MsgInvalidPassword;
}
} else {
$objectJSON->Status = INVALID_FIELDS;
$objectJSON->Message = MsgInvalidMail;
}
return $objectJSON;
}<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 5/7/2017
* License: Apache License 2.0
*/
/**
* Main change password process
* @param $requestData
* @param $mySQLConnection
* @param $objectJSON
* @return mixed
*/
function changePasswordProcess($requestData, $mySQLConnection, $objectJSON)
{
if (isValidTokenWithID($requestData->userID, $requestData->userToken, $mySQLConnection)) {
if (isValidPassword($requestData->oldPassword)) {
if (isValidPassword($requestData->newPassword)) {
if (isValidPassword($requestData->newPasswordValidate)) {
if ($requestData->newPassword == $requestData->newPasswordValidate) {
if ($requestData->newPassword == $requestData->oldPassword) {
$objectJSON->Status = INVALID_FIELDS;
$objectJSON->Message = MsgAuthChangePasswordSame;
} else {
$oldPasswordCrypt = getPasswordCryptByID($requestData->userID, $mySQLConnection);
if (isEqualPass($requestData->oldPassword, $oldPasswordCrypt)) {
$userMail = getMailFromID($requestData->userID, $mySQLConnection);
$newPasswordCrypt = createPasswordCrypt($requestData->newPassword);
$newTokenCrypt = createTokenCrypt($userMail, $requestData->newPassword);
$mySQLQuery = "UPDATE users SET password = '$<PASSWORD>', passwordSalt = '$new<PASSWORD>', token='$newTokenCrypt->hashedToken' WHERE id='$requestData->userID'";
/** @noinspection PhpUndefinedMethodInspection */
$mySQLConnection->query($mySQLQuery);
$objectJSON->Status = SUCCESS;
$objectJSON->Token = $newTokenCrypt->token;
$objectJSON->Message = MsgAuthChangePasswordSuccess;
} else {
$objectJSON->Status = INVALID_FIELDS;
$objectJSON->Message = MsgAuthChangePasswordWrongOldPassword;
}
}
} else {
$objectJSON->Status = INVALID_FIELDS;
$objectJSON->Message = MsgAuthChangeNewPasswordNotEqual;
}
} else {
$objectJSON->Status = INVALID_FIELDS;
$objectJSON->Message = MsgAuthChangePasswordInvalidPasswordValidate;
}
} else {
$objectJSON->Status = INVALID_FIELDS;
$objectJSON->Message = MsgAuthChangePasswordInvalidNewPassword;
}
} else {
$objectJSON->Status = INVALID_FIELDS;
$objectJSON->Message = MsgAuthChangePasswordInvalidOldPassword;
}
} else {
$objectJSON->Status = UNAUTHORIZED;
$objectJSON->Message = MsgUnauthorized;
}
return $objectJSON;
}<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 5/7/2017
* License: Apache License 2.0
*/
/**
* Check if requested method is POST
* @param $requestedMethod
* @return bool
*/
function isPostMethod($requestedMethod) {
if ($requestedMethod === 'POST') {
return true;
}
return false;
}
/**
* Check if POST map have at least 1 empty value
* @param $postMap
* @return bool
*/
function isPostValuesEmpty($postMap)
{
foreach ($postMap as $key => $value) {
if($value == "") {
return true;
}
}
return false;
}<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 2/8/2017
* License: Apache License 2.0
*/
// User Messages
define("MsgViewHistorySuccess", "We have found your history !");
// Developer Messages
define("MsgViewHistoryEmptyPOST", "You must give userID and userToken using POST method !");<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 6/7/2017
* License: Apache License 2.0
*/
require_once("../pointsManager.php");
/**
* Main remove points process
* @param $requestData
* @param $mySQLConnection
* @param $objectJSON
* @return mixed
*/
function removePointsProcess($requestData, $mySQLConnection, $objectJSON)
{
if (isIDUsed($requestData->userID, $mySQLConnection)) {
if (isValidTokenWithID($requestData->adminID, $requestData->adminToken, $mySQLConnection)) {
if (isAdminWithID($requestData->adminID, $mySQLConnection)) {
if ($requestData->pointsToRemove <= getPointsFromID($requestData->userID, $mySQLConnection)) {
// Calculate user's new points
$newPoints = getPointsFromID($requestData->userID, $mySQLConnection) - $requestData->pointsToRemove;
// Add history record for this remove process
addHistoryRecord($requestData->userID, "remove", $newPoints, $mySQLConnection);
// Update user's points
$mySQLQuery = "UPDATE users SET currentPoints='$newPoints', spendedPoints=spendedPoints+'$requestData->pointsToRemove' WHERE id='$requestData->userID'";
/** @noinspection PhpUndefinedMethodInspection */
$mySQLConnection->query($mySQLQuery);
$objectJSON->Status = SUCCESS;
$objectJSON->Message = MsgAdminRemovePointsSuccess;
} else {
$objectJSON->Status = INVALID_FIELDS;
$objectJSON->Message = MsgAdminRemovePointsNotEnoughPoints;
$objectJSON->CurrentPoints = getPointsFromID($requestData->userID, $mySQLConnection);
}
} else {
$objectJSON->Status = FORBIDDEN;
$objectJSON->Message = MsgForbidden;
}
} else {
$objectJSON->Status = UNAUTHORIZED;
$objectJSON->Message = MsgUnauthorized;
}
} else {
$objectJSON->Status = INVALID_FIELDS;
$objectJSON->Message = MsgAdminRemovePointsWrongID;
}
return $objectJSON;
}<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 4/7/2017
* License: Apache License 2.0
*/
require_once("../../../config.php");
require_once ("core.php");
require_once ("../../../v1/auth/register/core.php");
if(isPostMethod($_SERVER['REQUEST_METHOD'])) {
if (isset($_POST['adminID'], $_POST['adminToken'], $_POST['firstName'], $_POST['lastName'], $_POST['username'], $_POST['eMail'], $_POST['password'], $_POST['passwordValidate'])) {
if (isPostValuesEmpty($_POST)) {
$objectJSON->Status = EMPTY_FIELDS;
$objectJSON->Message = MsgAdminAddUserEmptyFields;
} else {
$requestData = new stdClass();
$requestData->adminID = $_POST['adminID'];
$requestData->adminToken = $_POST['adminToken'];
$requestData->firstName = $_POST['firstName'];
$requestData->lastName = $_POST['lastName'];
$requestData->username = $_POST['username'];
$requestData->eMail = $_POST['eMail'];
$requestData->password = $_POST['<PASSWORD>'];
$requestData->passwordValidate = $_POST['passwordValidate'];
$objectJSON = addUserProcess($requestData, $mySQLConnection, $objectJSON);
}
} else {
$objectJSON->Status = INVALID_REQUEST;
$objectJSON->Message = MsgAdminAddUserEmptyPOST;
}
} else {
$objectJSON->Status = METHOD_NOT_ALLOWED;
$objectJSON->Message = MsgMethodNotAllowed;
}
closeMySQLConnection($mySQLConnection);
printJSON($objectJSON);<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 2/8/2017
* License: Apache License 2.0
*/
// Admin Messages
define("MsgAdminUnBanUserSuccess", "You have unbanned this user !");
define("MsgAdminUnBanUserNotBanned", "This user is not banned !");
define("MsgAdminUnBanUserWrongID", "There is no user with this ID !");
define("MsgAdminUnBanUserEmptyField", "To unban user, you must fill in the field !");
// Developer Messages
define("MsgAdminUnBanUserEmptyPOST", "You must give adminID, adminToken and userID using POST method !");
<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 2/8/2017
* License: Apache License 2.0
*/
// User Messages
define("MsgAuthLoginSuccess", "You have logged-in successfully !");
define("MsgAuthLoginWrongPass", "Your password is wrong ! Try again !");
define("MsgAuthLoginBruteForce", "Your account was locked for security reasons ! Please wait 1 hour or contact technical support !");
define("MsgAuthLoginWrongMail", "There is no user with this e-mail ! Try again !");
define("MsgAuthLoginEmptyFields", "To log in, you must fill in all fields !");
// Developer Messages
define("MsgAuthLoginEmptyPOST", "You must give eMail and password using POST method !");<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 2/8/2017
* License: Apache License 2.0
*/
// User Messages
define("MsgAuthRegisterSuccess", "Your account was created and you logged-in automatically !");
define("MsgAuthRegisterMailUsed", "There is already a user with this e-mail !");
define("MsgAuthRegisterUsernameUsed", "There is already a user with this username !");
define("MsgAuthRegisterPasswordNotEqual", "The validation password is not equal with password !");
define("MsgAuthRegisterEmptyFields", "To register, you must fill in all fields !");
// Developer Messages
define("MsgAuthRegisterEmptyPOST", "You must give firstName, lastName, username, eMail, password and passwordValidate using POST method !");<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 6/7/2017
* License: Apache License 2.0
*/
/**
* @param $userID
* @param $mySQLConnection
* @return
*/
function getPointsFromID($userID, $mySQLConnection) {
$mySQLQuery = "SELECT currentPoints FROM users WHERE id='$userID'";
/** @noinspection PhpUndefinedMethodInspection */
$result = $mySQLConnection->query($mySQLQuery);
$result = mysqli_fetch_array($result);
return $result['currentPoints'];
}
/**
* Add a record into history table for a points change
* @param $userID
* @param $historyAction
* @param $newPoints
* @param $mySQLConnection
*/
function addHistoryRecord($userID, $historyAction, $newPoints, $mySQLConnection) {
$oldPoints = getPointsFromID($userID, $mySQLConnection);
$mySQLQuery = "INSERT INTO history (userID, action, oldPoints, newPoints) VALUES ('$userID', '$historyAction', '$oldPoints', '$newPoints')";
/** @noinspection PhpUndefinedMethodInspection */
$mySQLConnection->query($mySQLQuery);
}<file_sep><?php
/**
* Created by PhpStorm.
* User: iordkost
* Date: 29/7/2017
* Time: 11:42 πμ
*/
require_once("../config.php");
if (DebugMode) {
$Query = "TRUNCATE TABLE registerlog";
$Result = $mySQLConnection->query($Query);
$Query = "TRUNCATE TABLE loginlog";
$Result = $mySQLConnection->query($Query);
$Query = "TRUNCATE TABLE history";
$Result = $mySQLConnection->query($Query);
$Query = "TRUNCATE TABLE users";
$Result = $mySQLConnection->query($Query);
$objectJSON->Status = SUCCESS;
$objectJSON->Message = "RESET SUCCESS";
} else {
$objectJSON->Status = "DISABLED";
$objectJSON->Message = "Debug mode is disabled !";
}
printJSON($objectJSON);<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 2/8/2017
* License: Apache License 2.0
*/
require_once("getUser.php");
require_once("getPoints.php");
require_once("getHistory.php");<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 1/7/2017
* License: Apache License 2.0
*/
// Turn off all PHP error reporting
// Set 0 for production, set -1 for development
error_reporting(-1);
// Debug mode
// Set false for production, set true for development
define("DebugMode", true);
// PHP Requires
require_once("extensions/PHP2JSON/index.php");
require_once("extensions/postChecker/index.php");
require_once("extensions/PHPMailer-5.2.24/PHPMailerAutoload.php");
require_once("extensions/customPHPMailer/index.php");
require_once("extensions/antiBruteForce/index.php");
require_once("languages/setLanguage.php");
require_once("v1/auth/authManager.php");
require_once("v1/auth/passwordManager.php");
// System Info
define("sysName", "pointSystem");
define("sysVersion", "alpha");
define("sysOwner", "JNK Software");
define("sysDeveloper", "<NAME>");
define("sysMail", "<EMAIL>");
define("sysLicense", "Apache License 2.0");
// Security Settings
define("hashMethod", "sha256");
define("hashTimes", "65536");
// Enable JSON and init the JSON object
enableJSON();
$objectJSON = getStdJSON();
// Enable language function
if (isset($_POST['language'])) {
enableLanguage($_POST['language']);
} else {
enableLanguage();
}
// Set MySQL Connection Info
define("Host", "ouranos.jnksoftware.eu");
define("Port", "3306");
define("Username", "pointSystemUser");
define("Password", "<PASSWORD>");
define("Database", "pointSystemUser");
// Set SMTP Connection Info
define("SMTPHost", "mail.jnksoftware.eu");
define("SMTPPort", "25");
define("SMTPUsername", "<EMAIL>");
define("SMTPPassword", "");
// Try connect with MySQL Server, if can't print a JSON message and exit
try {
// Try connect with mySQL Server and Database
$mySQLConnection = mysqli_connect(Host, Username, Password, Database, Port);
// Set Charset as utf8 for this connection
mysqli_set_charset($mySQLConnection, "utf8");
// Check if connection failed
if (!$mySQLConnection) {
mySQLErrorHandler($objectJSON);
}
} catch (Exception $ex) {
mySQLErrorHandler($objectJSON);
}
/**
* Close passed MySQL connection
* @param $mySQLConnection
*/
function closeMySQLConnection($mySQLConnection) {
mysqli_close($mySQLConnection);
}
/**
* Print error message for mySQL
* @param $objectJSON
*/
function mySQLErrorHandler($objectJSON)
{
$objectJSON->Status = DB_ERROR;
$objectJSON->Message = MsgDBCantConnect;
printJSON($objectJSON);
die();
}<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 3/7/2017
* License: Apache License 2.0
*/
require_once("../../../config.php");
require_once ("core.php");
if(isPostMethod($_SERVER['REQUEST_METHOD'])) {
if (isset($_POST['eMail'], $_POST['password'])) {
if (isPostValuesEmpty($_POST)) {
$objectJSON->Status = EMPTY_FIELDS;
$objectJSON->Message = MsgAuthLoginEmptyFields;
} else {
$requestData = new stdClass();
$requestData->eMail = $_POST['eMail'];
$requestData->password = $_POST['password'];
$objectJSON = loginProcess($requestData, $mySQLConnection, $objectJSON);
addLoginRecord($requestData->eMail, $objectJSON->Status, $mySQLConnection);
}
} else {
$objectJSON->Status = INVALID_REQUEST;
$objectJSON->Message = MsgAuthLoginEmptyPOST;
}
} else {
$objectJSON->Status = METHOD_NOT_ALLOWED;
$objectJSON->Message = MsgMethodNotAllowed;
}
closeMySQLConnection($mySQLConnection);
printJSON($objectJSON);<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 5/7/2017
* License: Apache License 2.0
*/
/**
* Main recovery process
* @param $requestData
* @param $mySQLConnection
* @param $objectJSON
* @return mixed
*/
function recoveryProcess($requestData, $mySQLConnection, $objectJSON)
{
if (isValidUsername($requestData->username)) {
if (isValidMail($requestData->eMail)) {
if (isUsernameUsed($requestData->username, $mySQLConnection)) {
if (isMailUsed($requestData->eMail, $mySQLConnection)) {
$password = create<PASSWORD>();
$passwordCrypt = createPasswordCrypt($password);
// ToDo : Sent eMail with new password
customSentMail($requestData->eMail, $requestData->username, sysName . " | " . "New Password", "This is your new password : " . $password);
$newTokenCrypt = createTokenCrypt($requestData->eMail, $password);
$mySQLQuery = "UPDATE users SET password = <PASSWORD>', passwordSalt = '$<PASSWORD>', token='$newTokenCrypt->hashedToken' WHERE eMail='$requestData->eMail'";
/** @noinspection PhpUndefinedMethodInspection */
$mySQLConnection->query($mySQLQuery);
if (DebugMode) {
$objectJSON->Debug = new stdClass();
$objectJSON->Debug->newPassword = $<PASSWORD>;
}
$objectJSON->Status = SUCCESS;
$objectJSON->Message = MsgAuthRecoverySuccess;
} else {
$objectJSON->Status = INVALID_FIELDS;
$objectJSON->Message = MsgAuthRecoveryWrongMail;
}
} else {
$objectJSON->Status = INVALID_FIELDS;
$objectJSON->Message = MsgAuthRecoveryWrongUsername;
}
} else {
$objectJSON->Status = INVALID_FIELDS;
$objectJSON->Message = MsgInvalidMail;
}
} else {
$objectJSON->Status = INVALID_FIELDS;
$objectJSON->Message = MsgInvalidUsername;
}
return $objectJSON;
}<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 5/7/2017
* License: Apache License 2.0
*/
/**
* Main ban user process
* @param $requestData
* @param $mySQLConnection
* @param $objectJSON
* @return mixed
*/
function unBanUserProcess($requestData, $mySQLConnection, $objectJSON)
{
if (isIDUsed($requestData->userID, $mySQLConnection)) {
if (!isUserEnabledWithID($requestData->userID, $mySQLConnection)) {
$mySQLQuery = "UPDATE users SET status='enabled' WHERE id='$requestData->userID'";
/** @noinspection PhpUndefinedMethodInspection */
$mySQLConnection->query($mySQLQuery);
$objectJSON->Status = SUCCESS;
$objectJSON->Message = MsgAdminUnBanUserSuccess;
} else {
$objectJSON->Status = NO_ACTION;
$objectJSON->Message = MsgAdminUnBanUserNotBanned;
}
} else {
$objectJSON->Status = INVALID_FIELDS;
$objectJSON->Message = MsgAdminUnBanUserWrongID;
}
return $objectJSON;
}<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 3/7/2017
* License: Apache License 2.0
*/
require_once("../../../config.php");
require_once("core.php");
if (isPostMethod($_SERVER['REQUEST_METHOD'])) {
if (isset($_POST['firstName'], $_POST['lastName'], $_POST['username'], $_POST['eMail'], $_POST['password'], $_POST['passwordValidate'])) {
if (isPostValuesEmpty($_POST)) {
$objectJSON->Status = EMPTY_FIELDS;
$objectJSON->Message = MsgAuthRegisterEmptyFields;
} else {
$requestData = new stdClass();
$requestData->firstName = $_POST['firstName'];
$requestData->lastName = $_POST['lastName'];
$requestData->username = $_POST['username'];
$requestData->eMail = $_POST['eMail'];
$requestData->password = $_POST['<PASSWORD>'];
$requestData->passwordValidate = $_POST['passwordValidate'];
$objectJSON = registerProcess($requestData, $mySQLConnection, $objectJSON);
}
} else {
$objectJSON->Status = INVALID_FIELDS;
$objectJSON->Message = MsgAuthRegisterEmptyPOST;
}
} else {
$objectJSON->Status = METHOD_NOT_ALLOWED;
$objectJSON->Message = MsgMethodNotAllowed;
}
closeMySQLConnection($mySQLConnection);
printJSON($objectJSON);<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 2/8/2017
* License: Apache License 2.0
*/
// User Messages
define("MsgAuthChangePasswordSuccess", "You have change your password successfully !");
define("MsgAuthChangePasswordSame", "The new password can not be the same as your old password !");
define("MsgAuthChangeNewPasswordNotEqual", "The validation password is not equal with new password !");
define("MsgAuthChangePasswordWrongOldPassword", "Your old password is wrong ! Try again !");
define("MsgAuthChangePasswordInvalidPasswordValidate", "The validate password is not valid !");
define("MsgAuthChangePasswordInvalidNewPassword", "The new password is not valid !");
define("MsgAuthChangePasswordInvalidOldPassword", "The old password is not valid !");
define("MsgAuthChangePasswordEmptyFields", "To change your password, you must fill in all fields !");
// Developer Messages
define("MsgAuthChangePasswordEmptyPOST", "You must give userID, userToken, oldPassword, newPassword and newPasswordValidate using POST method !");
<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 5/7/2017
* License: Apache License 2.0
*/
/**
* Main get points process
* @param $requestData
* @param $mySQLConnection
* @param $objectJSON
* @return mixed
*/
function getPointsProcess($requestData, $mySQLConnection, $objectJSON)
{
if (isValidTokenWithID($requestData->userID, $requestData->userToken, $mySQLConnection)) {
$mySQLQuery = "SELECT currentPoints, gainedPoints, spendedPoints FROM users WHERE id = '$requestData->userID'";
/** @noinspection PhpUndefinedMethodInspection */
$Result = $mySQLConnection->query($mySQLQuery);
$Result = mysqli_fetch_array($Result);
$objectJSON->Status = SUCCESS;
$objectJSON->Message = MsgUserGetPointsSuccess;
$objectJSON->CurrentPoints = $Result['currentPoints'];
$objectJSON->GainedPoints = $Result['gainedPoints'];
$objectJSON->SpendedPoints = $Result['spendedPoints'];
} else {
$objectJSON->Status = UNAUTHORIZED;
$objectJSON->Message = MsgUnauthorized;
}
return $objectJSON;
}<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 2/8/2017
* License: Apache License 2.0
*/
// Admin Messages
define("MsgAdminBanUserSuccess", "You have banned this user !");
define("MsgAdminBanUserBanned", "This user is already banned !");
define("MsgAdminBanUserWrongID", "There is no user with this ID !");
define("MsgAdminBanUserEmptyField", "To ban user, you must fill in the field !");
// Developer Messages
define("MsgAdminBanUserEmptyPOST", "You must give adminID, adminToken and userID using POST method !");
<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 2/8/2017
* License: Apache License 2.0
*/
// Admin Messages
define("MsgAdminGetUsersSuccess", "We have found the users !");
// Developer Messages
define("MsgAdminGetUsersEmptyPOST", "You must give adminID and adminToken using POST method !");
<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 5/8/2017
* License: Apache License 2.0
*/
function customSentMail($toMail, $toName, $titleMail, $contentMail)
{
$mail = new PHPMailer;
$mail->SMTPDebug = 3;
$mail->isSMTP();
$mail->SMTPOptions = array(
'ssl' => array(
'verify_peer' => false,
'verify_peer_name' => false,
'allow_self_signed' => true
)
);
$mail->Host = SMTPHost;
$mail->SMTPAuth = true;
$mail->Username = SMTPUsername;
$mail->Password = <PASSWORD>;
$mail->SMTPSecure = 'tls';
$mail->Port = SMTPPort;
$mail->setFrom(sysMail, sysName);
$mail->addAddress($toMail, $toName);
$mail->Subject = $titleMail;
$mail->Body = $contentMail;
if (!$mail->send()) {
echo 'Message could not be sent.';
echo 'Mailer Error: ' . $mail->ErrorInfo;
} else {
echo 'Message has been sent';
}
}<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 2/8/2017
* License: Apache License 2.0
*/
require_once("register.php");
require_once("login.php");
require_once("recovery.php");
require_once("changePassword.php");<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 5/7/2017
* License: Apache License 2.0
*/
/**
* Main get user process
* @param $requestData
* @param $mySQLConnection
* @param $objectJSON
* @return mixed
*/
function getUserProcess($requestData, $mySQLConnection, $objectJSON)
{
if (isValidTokenWithID($requestData->userID, $requestData->userToken, $mySQLConnection)) {
$mySQLQuery = "SELECT firstName, lastName, userName, phone, currentPoints, gainedPoints, spendedPoints, status, creationDate FROM users WHERE ID = '$requestData->userID'";
/** @noinspection PhpUndefinedMethodInspection */
$Result = $mySQLConnection->query($mySQLQuery);
$Result = mysqli_fetch_array($Result);
$objectJSON->Status = SUCCESS;
$objectJSON->Message = MsgUserGetUserSuccess;
$objectJSON->User->firstName = $Result['firstName'];
$objectJSON->User->lastName = $Result['lastName'];
$objectJSON->User->phone = $Result['phone'];
$objectJSON->User->currentPoints = $Result['currentPoints'];
$objectJSON->User->gainedPoints = $Result['gainedPoints'];
$objectJSON->User->spendedPoints = $Result['spendedPoints'];
$objectJSON->User->status = $Result['status'];
$objectJSON->User->creationDate = $Result['creationDate'];
$mySQLQuery = "SELECT * FROM history WHERE userID = '$requestData->userID'";
/** @noinspection PhpUndefinedMethodInspection */
$Result = $mySQLConnection->query($mySQLQuery);
$index = 0;
/** @noinspection PhpUndefinedMethodInspection */
while ( $rows = $Result->fetch_assoc() ) {
$objectJSON->User->History[$index]->OldPoints = $rows['oldPoints'];
$objectJSON->User->History[$index]->NewPoints = $rows['newPoints'];
$objectJSON->User->History[$index]->Action = $rows['action'];
$objectJSON->User->History[$index]->Timestamp = $rows['timeStamp'];
$index++;
}
} else {
$objectJSON->Status = UNAUTHORIZED;
$objectJSON->Message = MsgUnauthorized;
}
return $objectJSON;
}
<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 2/8/2017
* License: Apache License 2.0
*/
/**
* Check if user can interact with API
* If user has 2 or less unsuccessful attempts, he can.
* If user has 3 unsuccessful attempts and 1 hour has not passed, he can't.
* If user has 3 unsuccessful attempts and 1 hour has passed, he can.
* @param $eMail
* @param $mySQLConnection
* @return bool
*/
function userCanInteract($eMail, $mySQLConnection)
{
if (getUnsuccessfulAttempts($eMail, $mySQLConnection) < 3) {
return true;
} else {
$mySQLQuery = "SELECT unlockDate < NOW() AS 'isUnlocked' FROM users WHERE eMail = '$eMail'";
/** @noinspection PhpUndefinedMethodInspection */
$Result = $mySQLConnection->query($mySQLQuery);
$Result = mysqli_fetch_array($Result);
if ($Result['isUnlocked'] == 1) {
resetUnsuccessfulAttempts($eMail, $mySQLConnection);
return true;
}
}
return false;
}
/**
* Get user's unsuccessful login attempts
* This is a brute-force security
* @param $eMail
* @param $mySQLConnection
* @return mixed
*/
function getUnsuccessfulAttempts($eMail, $mySQLConnection)
{
$mySQLQuery = "SELECT loginAttempts FROM users WHERE eMail = '$eMail'";
/** @noinspection PhpUndefinedMethodInspection */
$Result = $mySQLConnection->query($mySQLQuery);
$Result = mysqli_fetch_array($Result);
return $Result['loginAttempts'];
}
/**
* Add a unsuccessful attempt to user
* If user have 2 (+ this) unsuccessful attempts, he can't try to connect for 1 hour
* This is a brute-force security
* @param $eMail
* @param $mySQLConnection
*/
function addUnsuccessfulAttempt($eMail, $mySQLConnection)
{
$attempts = getUnsuccessfulAttempts($eMail, $mySQLConnection) + 1;
$mySQLQuery = "UPDATE users SET loginAttempts='$attempts' WHERE eMail='$eMail'";
/** @noinspection PhpUndefinedMethodInspection */
$mySQLConnection->query($mySQLQuery);
if ($attempts >= 3) {
$mySQLQuery = "UPDATE users SET status='locked', unlockDate=NOW()+INTERVAL 1 HOUR WHERE eMail='$eMail'";
/** @noinspection PhpUndefinedMethodInspection */
$mySQLConnection->query($mySQLQuery);
}
}
/**
* Reset the unsuccessful attempts of user to 0
* This is a brute-force security
* @param $eMail
* @param $mySQLConnection
*/
function resetUnsuccessfulAttempts($eMail, $mySQLConnection)
{
$mySQLQuery = "UPDATE users SET status='enabled', loginAttempts='0', unlockDate=NOW() WHERE eMail='$eMail'";
/** @noinspection PhpUndefinedMethodInspection */
$mySQLConnection->query($mySQLQuery);
}<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 2/7/2017
* License: Apache License 2.0
*/
/**
* Set Content-type as json and charset as utf-8
*/
function enableJSON()
{
header('Content-type: application/json;charset=utf-8;');
}
/**
* Create and return the standard JSON object
* @return stdClass
*/
function getStdJSON()
{
return new stdClass();
}
/**
* Print passed JSON's object content on page
* @param $jsonObject
*/
function printJSON($jsonObject)
{
echo json_encode($jsonObject, JSON_PRETTY_PRINT | JSON_UNESCAPED_UNICODE);
}<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 5/7/2017
* License: Apache License 2.0
*/
/**
* Add a record for this register process on registerlog table
* @param $username
* @param $eMail
* @param $attemptResult
* @param $mySQLConnection
*/
function addRegisterRecord($username, $eMail, $attemptResult, $mySQLConnection)
{
$clientIP = $_SERVER['REMOTE_ADDR'];
$mySQLQuery = "INSERT INTO registerlog (ip, username, eMail, attemptResult) VALUES ('$clientIP', '$username', '$eMail', '$attemptResult')";
/** @noinspection PhpUndefinedMethodInspection */
$mySQLConnection->query($mySQLQuery);
}
/**
* Main register process
* @param $requestData
* @param $mySQLConnection
* @param $objectJSON
* @return mixed
*/
function registerProcess($requestData, $mySQLConnection, $objectJSON)
{
if (isValidUsername($requestData->username)) {
if (isValidMail($requestData->eMail)) {
if (isValidPassword($requestData->password)) {
if (!isUsernameUsed($requestData->username, $mySQLConnection)) {
if (!isMailUsed($requestData->eMail, $mySQLConnection)) {
if (isPassWordEqual($requestData->password, $requestData->passwordValidate)) {
$passwordCrypt = createPasswordCrypt($requestData->password);
$tokenCrypt = createTokenCrypt($requestData->eMail, $requestData->password);
$mySQLQuery = "INSERT INTO users (firstName, lastName, username, eMail, password, passwordSalt, token) VALUES ('$requestData->firstName', '$requestData->lastName', '$requestData->username', '$requestData->eMail', '$passwordCrypt->encryptedPassword', '$passwordCrypt->passwordSalt', '$tokenCrypt->hashedToken')";
/** @noinspection PhpUndefinedMethodInspection */
$mySQLConnection->query($mySQLQuery);
$objectJSON->Status = SUCCESS;
$objectJSON->Token = $tokenCrypt->token;
$objectJSON->Message = MsgAuthRegisterSuccess;
} else {
$objectJSON->Status = INVALID_FIELDS;
$objectJSON->Message = MsgAuthRegisterPasswordNotEqual;
}
} else {
$objectJSON->Status = INVALID_FIELDS;
$objectJSON->Message = MsgAuthRegisterMailUsed;
}
} else {
$objectJSON->Status = INVALID_FIELDS;
$objectJSON->Message = MsgAuthRegisterUsernameUsed;
}
} else {
$objectJSON->Status = INVALID_FIELDS;
$objectJSON->Message = MsgInvalidPassword;
}
} else {
$objectJSON->Status = INVALID_FIELDS;
$objectJSON->Message = MsgInvalidMail;
}
} else {
$objectJSON->Status = INVALID_FIELDS;
$objectJSON->Message = MsgInvalidUsername;
}
addRegisterRecord($requestData->username, $requestData->eMail, $objectJSON->Status, $mySQLConnection);
return $objectJSON;
}<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 5/7/2017
* License: Apache License 2.0
*/
/**
* Main add user process
* @param $requestData
* @param $mySQLConnection
* @param $objectJSON
* @return mixed
*/
function addUserProcess($requestData, $mySQLConnection, $objectJSON)
{
if (isValidTokenWithID($requestData->adminID, $requestData->adminToken, $mySQLConnection)) {
if (isAdminWithID($requestData->adminID, $mySQLConnection)) {
// For security reasons, set adminID and adminToken as md5 of null
$requestData->adminID = md5("null");
$requestData->adminToken = md5("null");
$objectJSON = registerProcess($requestData, $mySQLConnection, $objectJSON);
if ($objectJSON->Status == SUCCESS) {
$objectJSON->Message = MsgAdminAddUserSuccess;
}
} else {
$objectJSON->Status = FORBIDDEN;
$objectJSON->Message = MsgForbidden;
}
} else {
$objectJSON->Status = UNAUTHORIZED;
$objectJSON->Message = MsgUnauthorized;
}
return $objectJSON;
}<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 2/8/2017
* License: Apache License 2.0
*/
// User Messages
define("MsgUserGetPointsSuccess", "We have found your points !");
// Developer Messages
define("MsgUserGetPointsEmptyPOST", "You must give userID and userToken using POST method !");<file_sep># pointSystemAPI
A Simple Point System for Shops !
<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 3/7/2017
* License: Apache License 2.0
*/
// General Status Messages
define("DB_ERROR", "DB_ERROR");
define("METHOD_NOT_ALLOWED", "METHOD_NOT_ALLOWED");
define("INVALID_REQUEST", "INVALID_REQUEST");
define("NOT_IMPLEMENTED", "NOT_IMPLEMENTED");
define("EMPTY_FIELDS", "EMPTY_FIELDS");
define("INVALID_FIELDS", "INVALID_FIELDS");
define("UNAUTHORIZED", "UNAUTHORIZED");
define("FORBIDDEN", "FORBIDDEN");
define("NO_ACTION", "NO_ACTION");
define("SUCCESS", "SUCCESS");
/**
* Include the right language file based on passed param
* @param $language
*/
function enableLanguage($language = "en")
{
switch ($language) {
default: {
require_once("en/en.php");
break;
}
}
}<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 6/7/2017
* License: Apache License 2.0
*/
require_once ("../pointsManager.php");
/**
* Main get users process
* @param $requestData
* @param $mySQLConnection
* @param $objectJSON
* @return mixed
*/
function getUserProcess($requestData, $mySQLConnection, $objectJSON)
{
if (isIDUsed($requestData->userID, $mySQLConnection)) {
$mySQLQuery = "SELECT * FROM users";
/** @noinspection PhpUndefinedMethodInspection */
$Result = $mySQLConnection->query($mySQLQuery);
$Result = mysqli_fetch_array($Result);
$objectJSON->Status = SUCCESS;
$objectJSON->Message = MsgAdminGetUserSuccess;
$objectJSON->id = $Result['id'];
$objectJSON->firstName = $Result['firstName'];
$objectJSON->lastName = $Result['lastName'];
$objectJSON->username = $Result['username'];
$objectJSON->eMail = $Result['eMail'];
$objectJSON->phone = $Result['phone'];
$objectJSON->currentPoints = $Result['currentPoints'];
$objectJSON->gainedPoints = $Result['gainedPoints'];
$objectJSON->spendedPoints = $Result['spendedPoints'];
$objectJSON->role = $Result['role'];
$objectJSON->loginAttempts = $Result['loginAttempts'];
$objectJSON->status = $Result['status'];
$objectJSON->creationDate = $Result['creationDate'];
$objectJSON->unlockDate = $Result['unlockDate'];
} else {
$objectJSON->Status = INVALID_FIELDS;
$objectJSON->Message = MsgAdminGetUserWrongID;
}
return $objectJSON;
}<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 5/7/2017
* License: Apache License 2.0
*/
/**
* Main get history process
* @param $requestData
* @param $mySQLConnection
* @param $objectJSON
* @return mixed
*/
function getHistoryProcess($requestData, $mySQLConnection, $objectJSON)
{
if (isValidTokenWithID($requestData->userID, $requestData->userToken, $mySQLConnection)) {
$mySQLQuery = "SELECT * FROM history WHERE userID = '$requestData->userID'";
/** @noinspection PhpUndefinedMethodInspection */
$Result = $mySQLConnection->query($mySQLQuery);
$objectJSON->Status = SUCCESS;
$objectJSON->Message = MsgUserGetPointsSuccess;
$index = 0;
/** @noinspection PhpUndefinedMethodInspection */
while ($rows = $Result->fetch_assoc()) {
$objectJSON->History[$index]->OldPoints = $rows['oldPoints'];
$objectJSON->History[$index]->NewPoints = $rows['newPoints'];
$objectJSON->History[$index]->Action = $rows['action'];
$objectJSON->History[$index]->Timestamp = $rows['timeStamp'];
$index++;
}
} else {
$objectJSON->Status = UNAUTHORIZED;
$objectJSON->Message = MsgUnauthorized;
}
return $objectJSON;
}
<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 5/7/2017
* License: Apache License 2.0
*/
require_once("../../../config.php");
require_once ("core.php");
if(isPostMethod($_SERVER['REQUEST_METHOD'])) {
if (isset($_POST['adminID'], $_POST['adminToken'], $_POST['userID'])) {
if (isPostValuesEmpty($_POST)) {
$objectJSON->Status = EMPTY_FIELDS;
$objectJSON->Message = MsgAdminBanUserEmptyField;
} else {
$requestData = new stdClass();
$requestData->adminID = $_POST['adminID'];
$requestData->adminToken = $_POST['adminToken'];
$requestData->userID = $_POST['userID'];
if (isValidTokenWithID($requestData->adminID, $requestData->adminToken, $mySQLConnection)) {
if (isAdminWithID($requestData->adminID, $mySQLConnection)) {
$objectJSON = banUserProcess($requestData, $mySQLConnection, $objectJSON);
} else {
$objectJSON->Status = FORBIDDEN;
$objectJSON->Message = MsgForbidden;
}
} else {
$objectJSON->Status = UNAUTHORIZED;
$objectJSON->Message = MsgUnauthorized;
}
}
} else {
$objectJSON->Status = INVALID_REQUEST;
$objectJSON->Message = MsgAdminBanUserEmptyPOST;
}
} else {
$objectJSON->Status = METHOD_NOT_ALLOWED;
$objectJSON->Message = MsgMethodNotAllowed;
}
closeMySQLConnection($mySQLConnection);
printJSON($objectJSON);<file_sep><?php
/**
* Owner: JNK Software
* Developer: <NAME>
* Date: 2/8/2017
* License: Apache License 2.0
*/
// Admin Messages
define("MsgAdminAddUserSuccess", "You have added this user !");
define("MsgAdminAddUserMailUsed", "There is already a user with this eMail !");
define("MsgAdminAddUserUsernameUsed", "There is already a user with this Username !");
define("MsgAdminAddUserInvalidMail", "This is not a valid eMail !");
define("MsgAdminAddUserInvalidPassword", "This is not a valid Password !");
define("MsgAdminAddUserInvalidUsername", "This is not a valid Username !");
define("MsgAdminAddUserEmptyFields", "To add user, you must fill in all fields !");
// Developer Messages
define("MsgAdminAddUserEmptyPOST", "You must give adminID, adminToken, firstName, lastName, username, eMail, password and passwordValidate using POST method !");
| d05147bfd4915f772c6b0462cd9104139151e1d1 | [
"Markdown",
"PHP"
] | 49 | Markdown | JNKSoftware/pointSystemAPI | 3fbc3844e20488484319b30f2b18611d11133e05 | 7f7b09bd8b47748ed42494d5112885344fceed0b |
refs/heads/master | <file_sep>My own NixOS packages, seperate from the fork.
<file_sep>{ stdenv, fetchurl, lua5, webkitgtk2, libunique, sqlite, pkgconfig, gtk, libsoup, git, lua5_filesystem, glib_networking, gsettings_desktop_schemas, makeWrapper, help2man }:
stdenv.mkDerivation rec {
name = "luakit";
version = "2012.09.13-r1";
src = fetchurl {
url = "https://github.com/mason-larobina/${name}/archive/${version}.tar.gz";
sha256 = "067g3bp5w20jypc8rx54dpbn3ysbgxzchgpq7sld5yh2z36s1l52";
};
buildInputs = [ lua5 webkitgtk2 libunique sqlite pkgconfig gtk libsoup git makeWrapper gsettings_desktop_schemas help2man ];
postPatch = ''
sed -i -e "s/DESTDIR/INSTALLDIR/" ./Makefile
sed -i -e "s|/etc/xdg/luakit/|$out/etc/xdg/luakit/|" lib/lousy/util.lua
patchShebangs ./build-utils
'';
buildPhase = ''
make DEVELOPMENT_PATHS=0 PREFIX=$out
'';
installPhase = ''
make DEVELOPMENT_PATHS=0 PREFIX=$out install
wrapProgram "$out/bin/luakit" \
--prefix GIO_EXTRA_MODULES : "${glib_networking}/lib/gio/modules" \
--prefix XDG_DATA_DIRS : "$GSETTINGS_SCHEMAS_PATH:$out/share" \
--prefix XDG_DATA_DIRS : "$out/usr/share/" \
--prefix XDG_DATA_DIRS : "$out/share/" \
--prefix XDG_CONFIG_DIRS : "$out/etc/xdg" \
--prefix LUA_PATH ";" "$out/share/luakit/lib/?/init.lua" \
--prefix LUA_PATH ";" "$out/share/luakit/lib/?.lua" \
--prefix LUA_CPATH ";" "${lua5_filesystem}/lib/lua/${lua5.luaversion}/?.so" \
--prefix LUA_CPATH ";" "${lua5}/lib/lua/${lua5.luaversion}/?.so"
'';
meta = {
homepage = "http://mason-larobina.github.io/luakit/";
description = "Fast, small, webkit based browser framework extensible by Lua";
platforms = stdenv.lib.platforms.linux;
maintainers = [ stdenv.lib.maintainers.meisternu ];
};
}
<file_sep>{ stdenv, fetchurl, SDL2, freetype }:
stdenv.mkDerivation {
name = "SDL2_ttf-2.0.12";
src = fetchurl {
url = http://www.libsdl.org/projects/SDL_ttf/release/SDL2_ttf-2.0.12.tar.gz;
sha256 = "8728605443ea1cca5cad501dc34dc0cb15135d1e575551da6d151d213d356f6e";
};
buildInputs = [SDL2 freetype];
postInstall = "ln -s $out/include/SDL/SDL_ttf.h $out/include/";
meta = {
description = "SDL TrueType library";
};
}
<file_sep>{ stdenv, fetchurl, buildPerlPackage }:
buildPerlPackage rec {
name = "Archive-Extract-0.72";
src = fetchurl {
url = "mirror://cpan/authors/id/B/BI/BINGOS/${name}.tar.gz";
sha256 = "e86acd52e75a7cc3cb602a3b84c831cb408067b4ad2d7fb7ba122d156fd745df";
};
meta = {
homepage = http://search.cpan.org/dist/Archive-Extract/;
description = "Archiving tools for perl";
license = "perl";
platforms = stdenv.lib.platforms.linux;
};
}
| 70c4932d7e641146fb0037bee35eb075e2c314e2 | [
"Markdown",
"Nix"
] | 4 | Markdown | meisternu/mynix | 544e76b01651c83d0c8ca81d684a81ffa0a1ea20 | 54bafa101f6198da987dc501fb13cf6a78be096e |
refs/heads/master | <repo_name>SeeThruHead/neutrino-dll<file_sep>/test.js
module.export = function() {
console.log('test butts');
};
| baf041185a8e9871efb4f9cda0e99a9811cd5922 | [
"JavaScript"
] | 1 | JavaScript | SeeThruHead/neutrino-dll | ab5485d1a95583ff2cc1af06eded438031e83b94 | f5dab90ea16305af5f1d4b53d6163baddb52d51b |
refs/heads/master | <repo_name>Ameer255/objects-practice-tasks<file_sep>/README.md
# objects-practice-tasks
# Link for advance level tasks :
https://github.com/Ameer255/objects-practice-tasks-advance
# Level basic and intermediate
// #LEVEL-1 Access methods
// Q1.
let arr = [5, 10, 15];
// Accessing third element
console.log(arr[2]);
// Q2.
let obj = {
name: "Maimoona",
degree: "MBBS"
}
// Accessing Degree property
console.log(obj.degree);
// Q3.
let arr2 = [1, 2, 3, 4, 5, 6, 7];
// Accessing each element of array using for loop
for (let i = 0; i < arr2.length; i++) {
console.log(arr2[i]);
}
// Q4.
let obj2 = {
name: "Maimoona",
degree: "MBBS",
age: 25
};
for (const prop in obj2) {
console.log(prop + " : "+ obj2[prop]);
}
// LEVEL # INTERMEDIATE
// Students names and hobbies data
let students1 = [
{
name: "Amna",
hobbies: ["eating", "cooking"]
},
{
name: "Daniyal",
hobbies: ["arts", "shopping"]
},
{
name: "Fahad",
hobbies: ["coding", "cooking"]
},
{
name: "Hajra",
hobbies: ["sleep", "reading"]
}
];
// Printing each student's data
students1.forEach((student) => {
console.log("Hobbies of " + student.name);
//using nested forEach for hobbies array
student.hobbies.forEach((hobby, index) => {
console.log((index + 1) + ". " + hobby);
});
});
// Extensive students data
let students = [
{
name: "Amna",
gender: "f",
dob: new Date("02-04-1990"),
address: {
ilaqa: "Gulistan-e-Johar",
city: "Karachi",
country: "Pakistan",
postalCode: 47114
},
phoneNo: "0331-2324243",
admissionTestScore: 56,
hasInternet: true,
hasComputer: false,
hasJob: true,
hasSchoolBefore: false
},
{
name: "Hadia",
gender: "f",
dob: new Date("05-15-1984"),
address: {
ilaqa: "Lyari",
city: "Karachi",
country: "Pakistan",
postalCode: 75660
},
phoneNo: "0345-3452953",
admissionTestScore: 48,
hasInternet: false,
hasComputer: false,
hasJob: false,
hasSchoolBefore: true
},
{
name: "Ahmed",
gender: "m",
dob: new Date("06-27-2002"),
address: {
ilaqa: "University Road",
city: "Quetta",
country: "Pakistan",
postalCode: 82215
},
phoneNo: "0333-0124325",
admissionTestScore: 33,
hasInternet: true,
hasComputer: false,
hasJob: false,
hasSchoolBefore: false
},
{
name: "Fariha",
gender: "f",
dob: new Date("09-13-1998"),
address: {
ilaqa: "University Road",
city: "Karachi",
country: "Pakistan",
postalCode: 82215
},
phoneNo: "0331-9432532",
admissionTestScore: 33,
hasInternet: true,
hasComputer: false,
hasJob: false,
hasSchoolBefore: false
},
{
name: "Abdullah",
gender: "m",
dob: new Date("01-24-1972"),
address: {
ilaqa: "Bazar Colony",
city: "Lahore",
country: "Pakistan",
postalCode: 32212
},
phoneNo: "0345-9912121",
admissionTestScore: 33,
hasInternet: false,
hasComputer: false,
hasJob: true,
hasSchoolBefore: true
}
];
// printing each student's data
students.forEach((student) => {
console.log("Name : " + student.name);
if (student.gender === "f") {
console.log("Gender : Female");
}
else {
console.log("Gender : Male");
}
console.log("City : " + student.address.city);
console.log("Score : " + student.admissionTestScore + " marks");
if (student.admissionTestScore >= 50) {
console.log("Passed\n");
}
else {
console.log("Failed\n");
}
});
let femaleStudents = new Array();
let maleStudents = new Array();
let passStudents = new Array();
let eligibleStudents = new Array();
let studentsAddress = new Array();
let ufoneStudents = new Array();
let groupA = new Array();
let groupB = new Array();
let ageOfStudents = new Array();
let olderStudents = new Array();
students.forEach((student) => {
// printing the names of female students only
if (student.gender === "f") {
femaleStudents.push(student.name);
}
else {
// printing the names of male students only
maleStudents.push(student.name);
}
// printing the names of students who have passed test
if (student.admissionTestScore >= 50) {
passStudents.push(student.name)
}
// printing the names of eligible students
if (student.address.city === "Karachi" && student.hasInternet) {
eligibleStudents.push(student.name);
}
// printing student's Address
studentsAddress.push(student.name + "'s address :\n" + student.address.ilaqa + " in " + student.address.city + ", " + student.address.country + "\n");
let num = parseInt(student.phoneNo)
// printing names of students who have ufone
if (num >= 330 && num < 340) {
ufoneStudents.push(student.name + "\n" + student.phoneNo);
}
// Printing the names of students in Group A, and in Group B
if (student.hasJob || student.hasSchoolBefore) {
groupB.push(student.name)
}
else {
groupA.push(student.name);
}
let currentYear = new Date().getYear();
// Printing age of each student
ageOfStudents.push(student.name + "'s age is " + (currentYear - student.dob.getYear()));
// printing age of oldest student
let age = 0;
let oldest = " ";
let currentAge = (currentYear - student.dob.getYear());
if (currentAge > age) {
age = currentAge;
oldest = student.name + " is oldest, age : " + currentAge;
olderStudents[0] = oldest;
}
});
let derivedData = [
{
title: "Female students",
data: femaleStudents
},
{
title: "Male students",
data: maleStudents
},
{
title: "Passed students",
data: passStudents
},
{
title: "Eligible students",
data: eligibleStudents
},
{
title: "Students Address",
data: studentsAddress
},
{
title: "Ufone students",
data: ufoneStudents
},
{
title: "Group A",
data: groupA
},
{
title: "Group B",
data: groupB
},
{
title: "Students Ages",
data: ageOfStudents
},
{
title: "Oldest student",
data: olderStudents
},
];
derivedData.forEach((data) => {
console.log(data.title);
data.data.forEach((info) => {
console.log(info);
});
});
// VIDEOS DATA TASK
let videos = [
{
title: "Photoshop tutorial",
lengthInMinutes: 70,
category: "Education",
uploadDate: new Date("08-14-2020"),
tags: "design, digital, photoshop, creativity",
features: ["Live", "360°", "HDR"],
viewCount: 4700,
rating: 4.8
},
{
title: "Episode # 01 - The Best Comedy Show",
lengthInMinutes: 2,
category: "Entertainment",
uploadDate: new Date("07-03-2019"),
tags: "comedy, funny",
features: ["Subtitles/CC", "3D", "HD"],
viewCount: 145615,
rating: 4.1
},
{
title: "How to use FOR EACH loop - tutorial by <NAME>",
lengthInMinutes: 25,
category: "Education",
uploadDate: new Date("11-10-2018"),
tags: "javascript, loops, web development",
features: ["Purchased", "HD"],
viewCount: 9004,
rating: 4.3
}
];
// Printing each video data
videos.forEach((video) => {
console.log(`Title : ${video.title} `);
console.log(`Length : ${video.lengthInMinutes} `);
console.log(`Category : ${video.category} `);
console.log(`Views : ${video.viewCount} `);
console.log(`Uploaded On : ${video.uploadDate.getDate()}-${new Intl.DateTimeFormat('en-US', {month: "short"}).format(video.uploadDate)}-${video.uploadDate.getFullYear()} `);
console.log(`Rating : ${video.rating} `);
});
// Printing short videos only
console.log("\nShort videos");
videos.forEach((video) => {
if (video.lengthInMinutes < 3) {
console.log(video.title);
}
});
// Printing long videos only
console.log("\n Long videos");
videos.forEach((video) => {
if (video.lengthInMinutes > 20) {
console.log(video.title);
}
});
// Printing longest videos only
let longestLength = 0;
let videoTitle = " ";
console.log("\nLongest video");
videos.forEach((video) => {
if (video.lengthInMinutes > longestLength) {
longestLength = video.lengthInMinutes;
videoTitle = video.title;
}
});
// Printing educational videos only
console.log(videoTitle);
console.log("\nEducational videos");
videos.forEach((video) => {
if (video.category === "Education") {
console.log(video.title);
}
});
// Printing videos which contain tag "javascript"
console.log("\nVideos with JavaScript tag");
videos.forEach((video) => {
if (video.tags.includes("javascript")) {
console.log(video.title);
}
});
// Printing videos which contain feature "HD"
console.log("\n Videos with HD feature");
videos.forEach((video) => {
for (let j = 0; j < video.features.length; j++) {
if (video.features[j] === "HD") {
console.log(video.title);
}
}
});
// Printing videos which are uploaded today
console.log("\n today Uploaded videos ");
let todayDate = new Date("08-14-2020");
videos.forEach((video) => {
let videoDate = video.uploadDate;
if (videoDate.getMonth() == todayDate.getMonth() && videoDate.getDay() === todayDate.getDay() && videoDate.getYear() === todayDate.getYear()) {
console.log(video.title);
}
});
// Printing videos which are uploaded this month
console.log("\n Videos uploaded this month ");
videos.forEach((video) => {
let videoDate = video.uploadDate;
if (videoDate.getMonth() == todayDate.getMonth() && videoDate.getYear() === todayDate.getYear()) {
console.log(video.title);
}
});
// Printing videos which are uploaded this year
console.log("\nVideos uploaded this year ");
videos.forEach((video) => {
let videoDate = video.uploadDate;
if (videoDate.getMonth() == todayDate.getMonth() && videoDate.getYear() === todayDate.getYear()) {
console.log(video.title);
}
});
// Sorting videos by viewCount
function sortByViews(video) {
for (let i = 0; i < video.length - 1; i++) {
if (video[i].viewCount > video[i + 1].viewCount) {
let temp = video[i];
video[i] = video[i + 1];
video[i + 1] = temp;
}
}
}
sortByViews(videos);
console.log("\n Sorted by Views");
videos.forEach((video) => {
console.log(video.title+ "\n Views : "+ video.viewCount);
});
// Sorting videos by ratings
function sortByRatings(video) {
for (let i = 0; i < video.length - 1; i++) {
if (video[i].rating > video[i + 1].rating) {
let temp = video[i];
video[i] = video[i + 1];
video[i + 1] = temp;
}
}
}
sortByRatings(videos);
console.log("\n Sorted by Ratings");
videos.forEach((video) => {
console.log(video.title);
});
| 6e901e89cdc06ac753325b798e8ba3bd00924b0b | [
"Markdown"
] | 1 | Markdown | Ameer255/objects-practice-tasks | c50e8304f77376b699ab02728d207a1935621f20 | 7af5d460d7cb44c24cd33280622b3c510eaadd1c |
refs/heads/master | <repo_name>Sang555/sql<file_sep>/dql.sql
select * from blog;
select * from blog where blog_id=001;
select * from category;
select cat_name from category where blog_id=1;
select * from comments;
select author from comments where text="Good!" AND pid=201;
select * from post;
update post set created_date="1997-06-10" where pid in(201,202,203);
update blog set blog_url="xyz.com" where blog_url="abc.com";
delete from blog where blog_id=3;
select * from tag;
update tag set tag_name="Tag5" where tag_id=404;
select * from post where sub_cat_id=101;
select * from comments where pid=201 OR author="keshu";
select post.title, comments.text from post inner join comments on post.pid=comments.pid;
select post.title, comments.text from post left join comments on post.pid=comments.pid;
select post.title, comments.text from post right join comments on post.pid=comments.pid;
select category.cat_id, post.title, comments.text from category left join post on category.sub_cat_id=post.sub_cat_id inner join comments on post.pid=comments.pid;
select post.pid, comments.text from post inner join comments on post.pid=comments.pid order by post.pid desc;
select count(pid),pid from comments group by pid;
select count(pid) as c,pid from comments group by pid having c>1;
<file_sep>/dml.sql
alter table post add column created_date date;
alter table category add column sub_cat_id int NOT NULL;
alter table category add constraint cpk foreign key (sub_cat_id) references category(cat_id) ;
alter table post add column sub_cat_id int NOT NULL;
alter table post add constraint psk foreign key (sub_cat_id) references category(sub_cat_id) ;
alter table blog change blog_name blog_name varchar(20);
insert into blog values (001, " My first blog" , "abc.com");
insert into blog values (002, " My second blog" , "def.com");
insert into blog values (003, " My first blog" , "ghi.com");
insert into category(cat_id, cat_name,blog_id) values (101, " Food ", 001, 101);
insert into category(cat_id, cat_name,blog_id) values (102, " Sea Food ", 001 , 101);
insert into category(cat_id, cat_name,blog_id) values (103, " travel and tours ", 002 , 103);
insert into category(cat_id, cat_name,blog_id) values (104, " pilgrimage tours", 002 , 103);
insert into post(pid,title,author,created_date,sub_cat_id) values (201, " Best ", "sang", 21/7/16, 101);
insert into post(pid,title,author,created_date,sub_cat_id) values (202, " Next ", "keshu", 21/7/16, 101);
insert into post(pid,title,author,created_date,sub_cat_id) values (203, " Suggested ", "keshu", 21/7/16, 101);
insert into comments(cid,author,text,pid) values (301,"sang", " Good! ", 201);
insert into comments(cid,author,text,pid) values (302, "keshu", " Wow! ", 201 );
insert into comments(cid,author,text,pid) values (303, "tash", " Bad! ", 202 );
insert into comments(cid,author,text,pid) values (304, " shiv", "nice!" , 203);
insert into tag(tag_id, tag_name) values (401, "Tag1");
insert into tag(tag_id, tag_name) values (402, "Tag2");
insert into tag(tag_id, tag_name) values (403, "Tag3");
insert into tag(tag_id, tag_name) values (404, "Tag4");
insert into prod_tag(tag_id, pid) values (401, 201);
insert into prod_tag(tag_id, pid) values (402, 201);
insert into prod_tag(tag_id, pid) values (403, 202);
insert into prod_tag(tag_id, pid) values (401, 202);
insert into prod_tag(tag_id, pid) values (403, 201);
insert into prod_tag(tag_id, pid) values (401, 203);
insert into prod_tag(tag_id, pid) values (402, 203);
insert into prod_tag(tag_id, pid) values (404, 203);<file_sep>/ddls.sql
create database blog_db;
use blog_db;
CREATE TABLE `blog` (
`blog_id` int(11) NOT NULL DEFAULT '0',
`blog_name` varchar(10) DEFAULT NULL,
`blog_url` varchar(10) DEFAULT NULL,
PRIMARY KEY (`blog_id`)
) ;
CREATE TABLE `category` (
`cat_id` int(11) NOT NULL,
`cat_name` varchar(10) DEFAULT NULL,
`blog_id` int(11) DEFAULT NULL,
PRIMARY KEY (`cat_id`),
KEY `blog_id` (`blog_id`),
CONSTRAINT `category_ibfk_1` FOREIGN KEY (`blog_id`) REFERENCES `blog` (`blog_id`)
);
CREATE TABLE `comments` (
`cid` int(11) NOT NULL,
`author` varchar(10) DEFAULT NULL,
`text` varchar(30) DEFAULT NULL,
`pid` int(11) DEFAULT NULL,
PRIMARY KEY (`cid`),
KEY `pid` (`pid`),
CONSTRAINT `comments_ibfk_1` FOREIGN KEY (`pid`) REFERENCES `post` (`pid`)
);
CREATE TABLE `blog` (
`blog_id` int(11) NOT NULL DEFAULT '0',
`blog_name` varchar(10) DEFAULT NULL,
`blog_url` varchar(10) DEFAULT NULL,
PRIMARY KEY (`blog_id`)
);
CREATE TABLE `post` (
`pid` int(11) NOT NULL,
`title` varchar(10) NOT NULL,
`author` varchar(10) NOT NULL,
`sub_cat_id` int(11) DEFAULT NULL,
PRIMARY KEY (`pid`),
KEY `fpk` (`sub_cat_id`),
CONSTRAINT `fpk` FOREIGN KEY (`sub_cat_id`) REFERENCES `sub_cat` (`sub_cat_id`)
);
CREATE TABLE `prod_tag` (
`tag_id` int(11) NOT NULL DEFAULT '0',
`pid` int(11) NOT NULL DEFAULT '0',
PRIMARY KEY (`pid`,`tag_id`),
KEY `qpk` (`tag_id`),
CONSTRAINT `qpk` FOREIGN KEY (`tag_id`) REFERENCES `tag` (`tag_id`),
CONSTRAINT `wpk` FOREIGN KEY (`pid`) REFERENCES `post` (`pid`)
);
CREATE TABLE `sub_cat` (
`sub_cat_id` int(11) NOT NULL,
`name` varchar(10) NOT NULL,
`cat_id` int(11) DEFAULT NULL,
PRIMARY KEY (`sub_cat_id`),
KEY `cpk` (`cat_id`),
CONSTRAINT `cpk` FOREIGN KEY (`cat_id`) REFERENCES `category` (`cat_id`)
);
CREATE TABLE `tag` (
`tag_id` int(11) NOT NULL,
`tag_name` varchar(20) DEFAULT NULL,
PRIMARY KEY (`tag_id`)
); | 42159046ee9c3e1b0b984a8d001619c2010400a3 | [
"SQL"
] | 3 | SQL | Sang555/sql | 78ed3fdfec090782e95a33e961ad04dffbda9b06 | 4e76b4846e8b0b57ea9e1870c384d045818de953 |
refs/heads/master | <repo_name>tobenski/nytaar.v2<file_sep>/resources/js/views/Order.vue
<template>
<div class="flex items-center justify-center py-4 px-2">
<div class="max-w-sm md:max-w-md lg:max-w-lg rounded overflow-hidden shadow-2xl">
<div class="flex flex-wrap bg-gray-100">
<div class="w-1/2 pr-2 pb-2">
<img class="w-full h-full rounded shadow-lg" src="https://madenimitliv.dk/wp-content/uploads/2018/02/DSC_0030-1024x788.jpg" alt="Lækker mad">
</div>
<div class="w-1/2 pl-2 pb-2">
<img class="w-full h-full rounded shadow-lg" src="https://bt.bmcdn.dk/media/cache/resolve/image_1240/image/3/36601/9127320-junk.jpg" alt="Lækker mad">
</div>
<div class="w-1/2 pr-2 pt-2">
<img class="w-full h-full rounded shadow-lg" src="https://media-cdn.tripadvisor.com/media/photo-s/0f/6b/d9/a6/laekker-mad.jpg" alt="Lækker mad">
</div>
<div class="w-1/2 pl-2 pt-2">
<img class="w-full h-full rounded shadow-lg" src="https://media.madetmere.dk/wp-content/uploads/2015/10/Mad-med-omtanke-4.jpg" alt="Lækker mad">
</div>
</div>
<form id="order" @submit.prevent="handleSubmit" class="w-full pt-6">
<div v-show="this.current === 1" class="md:flex md:items-center mb-6">
<div class="md:w-2/3">
<label class="block text-gray-500 font-bold md:text-right mb-1 md:mb-0 pr-4" for="adults">4 retters Nytårsmenu á 399,-</label>
</div>
<div class="md:w-1/3">
<input
class="bg-gray-200 appearance-none border-2 border-gray-200 rounded w-20 py-2 px-4 text-gray-700 leading-tight focus:outline-none focus:bg-white focus:border-purple-500"
type="number"
name="adults"
id="adults"
v-model="adults"
min="1"
max="50"
>
</div>
</div>
<div v-show="this.current === 1" class="md:flex md:items-center mb-6">
<div class="md:w-2/3">
<label class="block text-gray-500 font-bold md:text-right mb-1 md:mb-0 pr-4" for="children">Børnelasagne á 69,-</label>
</div>
<div class="md:w-1/3">
<input
class="bg-gray-200 appearance-none border-2 border-gray-200 rounded w-20 py-2 px-4 text-gray-700 leading-tight
focus:outline-none focus:bg-white focus:border-purple-500"
type="number"
name="children"
id="children"
v-model="children"
min="1"
max="50"
>
</div>
</div>
<div v-show="this.current === 1" class="md:flex md:items-center mb-6">
<div class="md:w-2/3">
<label class="block text-gray-500 font-bold md:text-right mb-1 md:mb-0 pr-4" for="total">I alt: </label>
</div>
<div class="md:w-1/3">
<input
class="bg-gray-200 appearance-none border-2 border-gray-200 rounded w-32 py-2 px-4 text-gray-700 leading-tight focus:outline-none focus:bg-white focus:border-purple-500"
type="number"
name="total"
id="total"
v-model="total"
readonly
>
</div>
</div>
<div v-show="this.current === 2">
<label for="name"><NAME>:</label>
<input type="text" name="name" id="name" v-model="name">
<label for="email">E-mail:</label>
<input type="email" name="email" id="email" v-model="email">
<label for="confirm_email">Gentag E-mail:</label>
<input type="email" name="confirm_email" id="confirm_email" v-model="confirm_email">
<label for="phone">Telefon nummer:</label>
<input type="number" name="phone" id="phone" v-model="phone">
</div>
<div class="px-6 pt-4 pb-4" v-show="isLast()">
<div class="text-right">
<button type="submit" class="bg-blue-500 hover:bg-blue-700 text-white font-bold py-2 px-4 rounded-full">
Bestil
</button>
</div>
</div>
</form>
<div class="px-6 pt-4 pb-2" v-show="!isLast()">
<div>
<button @click="navigatePrevious" v-show="!isFirst() && !isLast()" class="bg-blue-500 hover:bg-blue-700 text-white font-bold py-2 px-4 rounded-full">
Tilbage
</button>
<button @click="navigateNext" v-show="!isLast()" class="bg-blue-500 hover:bg-blue-700 text-white font-bold py-2 px-4 rounded-full">
Næste
</button>
</div>
</div>
</div>
</div>
</template>
<script>
import { mapGetters } from 'vuex';
import FirstStep from './order/FirstStep';
import SecondStep from './order/SecondStep';
import ThirdStep from './order/ThirdStep';
import FourthStep from './order/FourthStep';
export default {
name: "Order",
components: {
FirstStep,
SecondStep,
ThirdStep,
FourthStep,
},
data (){
return {
component: FirstStep,
last: 3,
current: 1,
adults: '2',
children: '0',
name: '',
email: '',
confirm_email: '',
phone: '',
}
},
mounted() { // Bruges ikke
this.$store.dispatch('fetchAuthUser');
},
computed: { // Hent probs fra Store
total: {
get() {
return (this.adults * 399) + (this.children * 69);
}
},
},
methods: {
isFirst() {
return this.component === FirstStep
},
isLast() {
return this.current === this.last
},
navigateNext() {
this.current++;
},
navigatePrevious() { // ændre som Next
this.current--;
},
handleSubmit:function() {
alert('submitted to the backend!'+this.adults);
},
}
}
</script>
<style>
</style><file_sep>/resources/js/router.js
import Vue from 'vue';
import VueRouter from 'vue-router';
import Start from './views/Start';
import Born from './views/Born';
import Order from './views/Order';
import FirstStep from './views/order/FirstStep';
import SecondStep from './views/order/SecondStep';
import ThirdStep from './views/order/ThirdStep';
import FourthStep from './views/order/FourthStep';
Vue.use(VueRouter);
export default new VueRouter({
mode: 'history',
routes: [
{
path: '/', name: 'home', component: Start,
},
{
path: '/born', name: 'born', component: Born,
},
{
path: '/order', name: 'order', component: Order,
},
{
path: '/order/details', name: 'order.details', component: SecondStep,
},
{
path: '/order/confirm', name: 'order.confirm', component: ThirdStep,
},
{
path: '/order/payment', name: 'order.payment', component: FourthStep,
},
]
});<file_sep>/resources/js/views/order/SecondStep.vue
<template>
<div>
<label for="name"><NAME>:</label>
<input type="text" name="name" id="name" v-model="name">
<label for="email">E-mail:</label>
<input type="email" name="email" id="email" v-model="email">
<label for="confirm_email">Gentag E-mail:</label>
<input type="email" name="confirm_email" id="confirm_email" v-model="confirm_email">
<label for="phone">Telefon nummer:</label>
<input type="number" name="phone" id="phone" v-model="phone">
</div>
</template>
<script>
import { mapGetters } from 'vuex';
export default {
name: "SecondStep",
data() {
return {
name: '',
email: '',
confirm_email: '',
phone: '',
}
},
computed: {
...mapGetters({
children: 'children',
adults: 'adults',
})
},
}
</script>
<style>
</style><file_sep>/resources/js/store/modules/order.js
const state = {
adults: '',
children: '',
};
const getters = {
adults: state => {
return state.adults;
},
children: state => {
return state.children;
},
};
const actions = {
};
const mutations = {
setAdults(state, adults) {
state.adults = adults;
},
setChilds(state, children) {
state.children = children;
},
};
export default {
state, getters, actions, mutations,
}<file_sep>/resources/js/views/order/ThirdStep.vue
<template>
<div>
3
<label for="adults">4 retters Nytårsmenu á 399,-</label>
<input type="number" name="adults" id="adults">
<label for="childs">Børne Lasagne á 69,-</label>
<input type="number" name="childs" id="childs">
</div>
</template>
<script>
import { mapGetters } from 'vuex';
export default {
name: "ThirdStep",
computed: {
...mapGetters({
children: 'children',
adults: 'adults',
})
},
}
</script>
<style>
</style><file_sep>/resources/js/components/Navigation.vue
<template>
<nav class="flex flex-col md:flex-row items-center justify-around bg-gray-200 p-4 w-full">
<div class="flex items-center justify-center text-gray-800 md:w-1/3 h-full w-full text-3xl font-semibold font-header tracking-tight">
<NAME>
</div>
<div class="flex items-center justify-center text-gray-800 md:w-1/3 h-full w-full text-2xl font-semibold font-header tracking-tight">
Nytårs Menu 2020
</div>
<div class="lg:w-1/3 hidden lg:flex"></div>
</nav>
</template>
<script>
export default {
name: "Navigation",
}
</script>
<style>
</style><file_sep>/resources/js/views/Born.vue
<template>
<Born />
</template>
<script>
import MenuCard from '../components/MenuCard';
export default {
name: "Born",
components: {
MenuCard,
}
}
</script>
<style>
</style><file_sep>/resources/js/views/order/FirstStep.vue
<template>
<div>
<label for="adults">4 retters Nytårsmenu á 399,-</label>
<input type="number" name="adults" id="adults" v-model="adults">
<label for="childs">Børne Lasagne á 69,-</label>
<input type="number" name="childs" id="childs" v-model="children">
</div>
</template>
<script>
import { mapGetters } from 'vuex';
import Navigation from './Navigation';
export default {
name: "FirstStep",
components: {
Navigation,
},
data() {
return {
adults: '',
children: '',
}
},
/*
computed: {
...mapGetters({
children: 'children',
adults: 'adults',
})
},
*/
}
</script>
<style>
</style><file_sep>/resources/js/components/MenuCard.vue
<template>
<div class="max-w-sm md:max-w-md lg:max-w-lg rounded overflow-hidden shadow-2xl">
<div class="flex flex-wrap bg-gray-100">
<div class="w-1/2 pr-2 pb-2">
<img class="w-full h-full rounded shadow-lg" src="https://madenimitliv.dk/wp-content/uploads/2018/02/DSC_0030-1024x788.jpg" alt="Lækker mad">
</div>
<div class="w-1/2 pl-2 pb-2">
<img class="w-full h-full rounded shadow-lg" src="https://bt.bmcdn.dk/media/cache/resolve/image_1240/image/3/36601/9127320-junk.jpg" alt="Lækker mad">
</div>
<div class="w-1/2 pr-2 pt-2">
<img class="w-full h-full rounded shadow-lg" src="https://media-cdn.tripadvisor.com/media/photo-s/0f/6b/d9/a6/laekker-mad.jpg" alt="Lækker mad">
</div>
<div class="w-1/2 pl-2 pt-2">
<img class="w-full h-full rounded shadow-lg" src="https://media.madetmere.dk/wp-content/uploads/2015/10/Mad-med-omtanke-4.jpg" alt="Lækker mad">
</div>
</div>
<div class="px-6 py-4">
<div class="font-bold text-xl mb-2">4 retters Nytårs Menu</div>
<p class="text-gray-700 text-base mb-2">
<span class="font-bold">Jordskokke suppe & Kammusling</span><br />
Cremet suppe af jordskok med ristet kammusling, urteolie & brødcroutons.
</p>
<p class="text-gray-700 text-base mb-2">
<span class="font-bold">Røget dyrekølle</span><br />
Røget dyrekølle med creme af blå kornblomst fra Thise mejeri, syltede rødløg & rugchips.
</p>
<p class="text-gray-700 text-base mb-2">
<span class="font-bold">Tournedoes af Dansk Kalvemørbrad</span><br />
Tournedoes af Dansk Kalvemørbrad serveret med pommes Anna, Timan sauce & variation af gulerod.
</p>
<p class="text-gray-700 text-base mb-2">
<span class="font-bold">Lime/Lakrids cheescake</span><br />
Cheesecake toppet med lime & lakridscreme, pyntet med physalis & mango coulis.
</p>
<p class="text-gray-700 text-base mb-2">
<span class="font-bold">Pris per kuvert 399,-</span>
</p>
</div>
<div class="px-6 pt-4 pb-2">
<router-link to="/order" class="inline-block bg-gray-200 rounded-full px-3 py-1 text-sm font-semibold text-gray-700 mr-2 mb-2">Bestil</router-link>
<router-link to="/" class="inline-block bg-gray-200 rounded-full px-3 py-1 text-sm font-semibold text-gray-700 mr-2 mb-2">4 retters Menu</router-link>
<router-link to="/born" class="inline-block bg-gray-200 rounded-full px-3 py-1 text-sm font-semibold text-gray-700 mr-2 mb-2">Børne Menu</router-link>
</div>
</div>
</template>
<script>
export default {
name: "MenuCard",
}
</script>
<style>
</style><file_sep>/resources/js/views/Start.vue
<template>
<div class="flex items-center justify-center py-4 px-2">
<MenuCard />
</div>
</template>
<script>
import MenuCard from '../components/MenuCard';
export default {
name: "Start",
components: {
MenuCard
}
}
</script>
<style>
</style><file_sep>/resources/js/views/order/Navigation.vue
<template>
<div>
<button @click="navigatePrevious" v-show="!isFirst()" class="bg-blue-500 hover:bg-blue-700 text-white font-bold py-2 px-4 rounded-full">
Tilbage
</button>
<button @click="navigateNext" class="bg-blue-500 hover:bg-blue-700 text-white font-bold py-2 px-4 rounded-full">
{{ nextButtonText() }}
</button>
</div>
</template>
<script>
export default {
name: "Navigation",
props: [
'swapComponent',
],
methods: {
nextButtonText() {
if (this.$route.name === 'order.fourthStep')
{
return 'Bestil';
}
return 'Næste';
},
isFirst() {
return this.$route.name === 'order'
},
navigateNext() {
if (this.$route.name === 'order.payment') {
submit();
} else if (this.$route.name === 'order.confirm') {
this.$router.push('/order/payment');
} else if (this.$route.name === 'order.details') {
this.$router.push('/order/confirm');
} else if (this.$route.name === 'order') {
//this.$router.push('/order/details');
swapComponent("SecondStep");
}
},
navigatePrevious() {
if (this.$route.name === 'order.payment') {
this.$router.push('/order/confirm');
} else if (this.$route.name === 'order.confirm') {
this.$router.push('/order/details');
} else if (this.$route.name === 'order.details') {
this.$router.push('/order');
}
},
submit() {
alert('submitted to the backend!');
},
}
}
</script>
<style>
</style> | daaaaa22463db6af14c7a7bcbf6fd8f16ae3d905 | [
"Vue",
"JavaScript"
] | 11 | Vue | tobenski/nytaar.v2 | 8208f94daada53e560b5dc0fbe8b2bf85736c30b | f32fcb570761564082468e1238faa66da54a8401 |
refs/heads/master | <file_sep>{% if show %}
<h1> Data found </h1>
{% for obj in show %}
{{obj}} {{obj.email}} <br>
{% endfor %}
{% else %}
<h> data not found </h>
{% endif %}
<file_sep>from django.db import models
class d1(models.Model):
username=models.CharField(max_length=122)
password=models.CharField(max_length=122)
# image=models.ImageField(max_length=122)
# mobile=models.CharField(max_length=122)
class d2(models.Model):
Address=models.CharField(max_length=122)
Street_name=models.CharField(max_length=122)
Near_location=models.CharField(max_length=122)
# image=models.ImageField(max_length=122)
#class d3(models.Model):
#Address=models.CharField(max_length=122)
# Street_name=models.CharField(max_length=122)
# Near_location=models.CharField(max_length=122)
# ImageField=models.ImageField(upload_to="c1/c2")
# Create your models here.
<file_sep># Generated by Django 3.1.7 on 2021-10-17 12:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('customer', '0003_d2'),
]
operations = [
migrations.CreateModel(
name='d3',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Address', models.CharField(max_length=122)),
('Street_name', models.CharField(max_length=122)),
('Near_location', models.CharField(max_length=122)),
],
),
]
<file_sep>from django.shortcuts import render,redirect
from django import forms
from .forms import f1,f2
from .models import d1,d2
from django.contrib.auth.models import User
#def home(request):
##### return render(request,'home.html')
def show(request):
sp=User.objects.all()
print(sp)
return render(request,'show.html',{'show':sp})
def register(request):
formz=f1((request.POST))
if(request.method=='POST'):
if formz.is_valid():
username=formz.cleaned_data['username']
password=<PASSWORD>z.cleaned_data['<PASSWORD>']
#address=formz.cleaned_data['address']
user = User.objects.create_user(username,password)
return redirect('/accounts/login')
else:
return render(request,'register.html',{'errors':formz.errors})
else:
return render(request,'register.html',{'show':formz})
def update(request,id):
sk=d2.objects.get(id=id)
formz=f2(request.POST)
if formz.is_valid():
sk.Address=formz.cleaned_data['Address']
sk.Street_name=formz.cleaned_data['Street_name']
sk.Near_location=formz.cleaned_data['Near_location']
sk.save()
print("form submitted")
return redirect('/register/addresses')
else:
print('form not submitted',f1.errors)
return render(request,'edit.html',{'data':sk})
def edit(request,id):
s2=d2.objects.get(id=id)
return render(request,'edit.html',{'s2':s2})
def no(request):
formz=f2((request.POST))
if(request.method=='POST'):
if formz.is_valid():
Address=formz.cleaned_data['Address']
Street_name=formz.cleaned_data['Street_name']
Near_location=formz.cleaned_data['Near_location']
db=d2(Address=Address,Street_name=Street_name,Near_location=Near_location)
db.save()
return redirect('/register/no')
else:
return render(request,'no.html',{'error1':formz.errors})
else:
return render(request,'no.html',{'open':f2})
def slides(request):
formz=f2((request.POST))
if(request.method=='POST'):
if formz.is_valid():
Address=formz.cleaned_data['Address']
Street_name=formz.cleaned_data['Street_name']
Near_location=formz.cleaned_data['Near_location']
db=d2(Address=Address,Street_name=Street_name,Near_location=Near_location)
db.save()
return redirect('/register/no')
else:
return render(request,'slides.html',{'error1':formz.errors})
else:
return render(request,'slides.html',{'open':f2})
def lappy(request):
formz=f2((request.POST))
if(request.method=='POST'):
if formz.is_valid():
Address=formz.cleaned_data['Address']
Street_name=formz.cleaned_data['Street_name']
Near_location=formz.cleaned_data['Near_location']
db=d2(Address=Address,Street_name=Street_name,Near_location=Near_location)
db.save()
return redirect('/register/lappy')
else:
return render(request,'lappy.html.html',{'error1':formz.errors})
else:
return render(request,'lappy.html',{'open':f2})
def addresses(request):
add=d2.objects.all()
return render(request,'addresses.html',{'show1':add})
def cart(request):
sk=d2.objects.all()
return render(request,'cart.html',{'cart':cart})
def delete(request,id):
sps=d2.objects.get(id=id)
sps.delete()
return redirect('/register/addresses')
# Create your views here.
<file_sep>from django.contrib import admin
from .models import d1
admin.site.register(d1)
from .models import d2
admin.site.register(d2)
# Register your models here.
<file_sep>from django import forms
class f1(forms.Form):
username=forms.CharField()
password=<PASSWORD>()
#mobile=forms.CharField()
class f2(forms.Form):
Address=forms.CharField(max_length=122)
Street_name=forms.CharField(max_length=122)
Near_location=forms.CharField(max_length=122)
# image=forms.ImageField(max_length=122)
#mobile=forms.CharField(max_length=122)<file_sep># Generated by Django 3.1.7 on 2021-10-16 17:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('customer', '0002_auto_20211015_2001'),
]
operations = [
migrations.CreateModel(
name='d2',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Address', models.CharField(max_length=122)),
('Street_name', models.CharField(max_length=122)),
('Near_location', models.CharField(max_length=122)),
],
),
]
<file_sep># Generated by Django 3.1.7 on 2021-10-16 07:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('customer', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='d1',
old_name='address',
new_name='password',
),
migrations.RenameField(
model_name='d1',
old_name='mobile',
new_name='username',
),
migrations.RemoveField(
model_name='d1',
name='name',
),
]
<file_sep><link href="https://cdn.jsdelivr.net/npm/bootstrap@5.1.1/dist/css/bootstrap.min.css" rel="stylesheet" integrity="<KEY>" crossorigin="anonymous">
<script src="https://cdn.jsdelivr.net/npm/bootstrap@5.1.1/dist/js/bootstrap.bundle.min.js" integrity="<KEY>" crossorigin="anonymous"></script>
{% extends 'base.html' %}
{% block subheading5 %}
<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.1.1/dist/css/bootstrap.min.css" rel="stylesheet" integrity="<KEY>" crossorigin="anonymous">
<script src="https://cdn.jsdelivr.net/npm/bootstrap@5.1.1/dist/js/bootstrap.bundle.min.js" integrity="<KEY>" crossorigin="anonymous"></script>
<div style="border: 8px solid;;">
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css" integrity="<KEY>" crossorigin="anonymous">
<script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="<KEY>" crossorigin="anonymous"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.12.9/umd/popper.min.js" integrity="<KEY>" crossorigin="anonymous"></script>
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js" integrity="<KEY>" crossorigin="anonymous"></script>
<table> <tr>
<td> <div id="carouselExampleIndicators" class="carousel slide" data-ride="carousel">
<ol class="carousel-indicators">
<li data-target="#carouselExampleIndicators" data-slide-to="0" class="active"></li>
<li data-target="#carouselExampleIndicators" data-slide-to="1"></li>
<li data-target="#carouselExampleIndicators" data-slide-to="2"></li>
</ol>
<div class="carousel-inner">
<div class="carousel-item active">
<img class="d-block w-450" height="400" src="/static/download (23)apple laptop.jpg" alt="First slide">
<div class="carousel-caption d-none d-md-block">
<h5>My Caption Title (1st Image)</h5>
<p>The whole caption will only show up if the screen is at least medium size.</p>
</div>
</div>
<div class="carousel-item">
<img class="d-block w-450" height="400" src="/static/download (23)apple1.jpg" alt="Second slide">
</div>
<div class="carousel-item">
<img class="d-block w-450" height="400" src="/static/download (23)apple2.jpg" alt="Third slide">
</div>
</div>
<a class="carousel-control-prev" href="/static/download (23)apple3.jpg" role="button" data-slide="prev">
<span class="carousel-control-prev-icon" aria-hidden="true"></span>
<span class="sr-only">Previous</span>
</a>
<a class="carousel-control-next" href="#carouselExampleIndicators" role="button" data-slide="next">
<span class="carousel-control-next-icon" aria-hidden="true"></span>
<span class="sr-only">Next</span>
</a>
</td>
</div>
<td>
<div id="carouselExampleIndicators" class="carousel slide" data-ride="carousel">
<ol class="carousel-indicators">
<li data-target="#carouselExampleIndicators" data-slide-to="0" class="active"></li>
<li data-target="#carouselExampleIndicators" data-slide-to="1"></li>
<li data-target="#carouselExampleIndicators" data-slide-to="2"></li>
</ol>
<div class="carousel-inner">
<div class="carousel-item active">
<img class="d-block w-450" height="400" src="/static/a1.jpg" alt="First slide">
<div class="carousel-caption d-none d-md-block">
<h5>My Caption Title (1st Image)</h5>
<p>The whole caption will only show up if the screen is at least medium size.</p>
</div>
</div>
<div class="carousel-item">
<img class="d-block w-450" height="400" src="/static/a2.jpg" alt="Second slide">
</div>
<div class="carousel-item">
<img class="d-block w-450" height="400" src="/static/a3.jpg" alt="Third slide">
</div>
</div>
<a class="carousel-control-prev" href="/static/a4.jpg" role="button" data-slide="prev">
<span class="carousel-control-prev-icon" aria-hidden="true"></span>
<span class="sr-only">Previous</span>
</a>
<a class="carousel-control-next" href="#carouselExampleIndicators" role="button" data-slide="next">
<span class="carousel-control-next-icon" aria-hidden="true"></span>
<span class="sr-only">Next</span>
</a>
</td>
<td>
<div id="carouselExampleIndicators" class="carousel slide" data-ride="carousel">
<ol class="carousel-indicators">
<li data-target="#carouselExampleIndicators" data-slide-to="0" class="active"></li>
<li data-target="#carouselExampleIndicators" data-slide-to="1"></li>
<li data-target="#carouselExampleIndicators" data-slide-to="2"></li>
</ol>
<div class="carousel-inner">
<div class="carousel-item active">
<img class="d-block w-450" height="400" src="/static/a4.jpg" alt="First slide">
<div class="carousel-caption d-none d-md-block">
<h5>My Caption Title (1st Image)</h5>
<p>The whole caption will only show up if the screen is at least medium size.</p>
</div>
</div>
<div class="carousel-item">
<img class="d-block w-450" height="400" src="/static/a3.jpg" alt="Second slide">
</div>
<div class="carousel-item">
<img class="d-block w-450" height="400" src="/static/a1.jpg" alt="Third slide">
</div>
</div>
<a class="carousel-control-prev" href="/static/a2.jpg" role="button" data-slide="prev">
<span class="carousel-control-prev-icon" aria-hidden="true"></span>
<span class="sr-only">Previous</span>
</a>
<a class="carousel-control-next" href="#carouselExampleIndicators" role="button" data-slide="next">
<span class="carousel-control-next-icon" aria-hidden="true"></span>
<span class="sr-only">Next</span>
</a>
</td>
</div>
</table>
<table>
<tr> Price₹92900*₹117900*
Display
Retina display
33.74 cm / 13.3-inch (diagonal) LED-backlit display with IPS technology; 2560x1600 native resolution at 227 pixels per inch with support for millions of colours
Supported scaled resolutions:
1680x1050
1440x900
1024x640
400 nits brightness
Wide colour (P3)
True Tone technology
ChipApple M1 chip
8-core CPU with 4 performance cores and 4 efficiency cores
7-core GPU
16-core Neural Engine
Apple M1 chip
8-core CPU with 4 performance cores and 4 efficiency cores
8-core GPU
16-core Neural Engine
Battery and Power1
Up to 15 hours wireless web
Up to 18 hours Apple TV app movie playback
Built-in 49.9-watt-hour lithium‑polymer battery
30W USB-C Power Adapter
Charging and Expansion
Two Thunderbolt / USB 4 ports with support for:
Charging
DisplayPort
Thunderbolt 3 (up to 40 Gbps)
USB 4 (up to 40Gbps)
USB 3.1 Gen 2 (up to 10 Gbps)
</ul>
</tr>
</table>
<center>
{{user}}
<br>
<input type="radio" name="pay on delivery"> pay on delivery
<input type="radio" name="pay on delivery"> online transaction
<img src="/static/cash on delivery.jpg" width="200">
<form method="post">
{% csrf_token %}
{{open}}
{{error1}}
<input type="submit" value="add">
<a href="cart"> <img src="/static/download (19).jpg" width="130"> </a>
<br>
<br>
</center>
<div style="border: 9px solid;">
</div>
</form>
<center>
<h3> For other transactions</h3>
<select name="card" name="name">
<option value="card"> -->Select card type--> </option>
<option value="debit card"> Debit card </option>
<option value="credit card"> credit card</option>
<option value="master card"> Master card</option>
<option value="visa"> Visa </option> </td>
</select>
<img src="/static/download (10).jpg" width="130">
Enter date:- <input type="datetime" name="date">
</center>
<center>
Exp date:- <input type="date" name="date">
</center>
<center>
<a href="/register/no"> Submit </a>
</center>
<center>
<h3> digital transactions </h3>
<a href="https://www.netbanking.com"> Netbanking</a>
<br>
<a href="https://www.paypal.com"> paypal </a>
<br>
<a href="https://www.googlepay"> google pay</a>
<br>
<a href="https://www.phonepay"> phone pay</a>
</center>
{% endblock subheading5 %}<file_sep>from django.urls import path
from . import views
urlpatterns = [
path('register',views.register,name='register'),
# path('home', views.home,name='home'),
path('show', views.show,name='show'),
#path('/new1', views.new1,name='new1'),
path('no', views.no ,name='no'),
path('addresses', views.addresses,name="addresses"),
path('slides', views.slides, name="slides"),
path('cart', views.cart, name="cart"),
# path('delete/<str:id>',views.delete, name='delete'),
path('delete/<int:id>',views.delete,name='delete'),
path('edit/<int:id>', views.edit, name='edit'),
path('update/<int:id>', views.update, name='update'),
path('lappy', views.lappy, name="lappy"),
]<file_sep># Generated by Django 3.1.7 on 2021-10-18 06:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('customer', '0004_d3'),
]
operations = [
migrations.DeleteModel(
name='d3',
),
]
| 6f4435d6f831d98c10ab64057b73595fcd89982b | [
"HTML",
"Python"
] | 11 | HTML | nikhil3804/flipkart_clone | dc80e1bb06c66e1c552fff2fed99e3f2240d5768 | 0fedab1e7b070e540df31191a757af6e3e61fc1a |
refs/heads/master | <file_sep># pro 23
output link
https://gaurimakker.github.io/pro-23-final/
| 4bddbd85586b70b1996027be53b53046065f19ec | [
"Markdown"
] | 1 | Markdown | gaurimakker/pro-23-final | 47cc5536ea14fe6f815a64d6cf926805c2845cce | 834b6fe8f2b23f40d88402ce7ff73ea555cd773f |
refs/heads/main | <repo_name>tberkery/Baltimore-City-Voting-Clusters<file_sep>/README.md
# Baltimore-City-Voting-Clusters
Applying a cluster analysis to data from the 2016 Presidential (General) Election for Baltimore City. Observing how precincts can be grouped based on number of Republicans, number of Democrats, and voter turnout rate.
# Data Source
The dataset behind this analysis is from the [Baltimore City Government Website](https://elections.maryland.gov/elections/2016/index.html). Specifically, it is the dataset titled "Statewide by Party and Precinct (CSV)" on that webpage. The Excel workbook version of this dataset in this GitHub repository can be accessed [here](https://github.com/tberkery/Baltimore-City-Voting-Clusters/blob/main/Raw%20Data:%20Official%20by%20Party%20and%20Precinct).
# Statement of Background
The 2016 Election involved the same candidates across every precinct and Baltimore and every precinct nationwide. However, the specific dynamics of the election varied quite a bit across different precincts. Precincts, which combine to form a district and tend to span part or all of a neighborhood, each have different numbers of registered Republicans, different numbers of registered Democrats, and different voter turnout rates. This largely makes sense: different neighborhoods tend to have people of different upbringings, backgrounds, socioeconomic status, and priorities, all of which play a role in political preferences and likelihood to be involved in the political process. Thus, as a means of comparing the similarity of the dynamics of the 2016 election in a given precinct to other Baltimore City precincts, a cluster analysis will of residents' party affiliations and degree of involvement in elections will be performed.
# Business Question
How can we group Baltimore City precincts into clusters based on party affiliation, population, and voter turnout using data from the 2016 presidential election?
# Data Interpretation
For context, consider the following map of Baltimore precincts. The goal here is not to fixate on what precinct is where but rather to understand the approximate composition of precincts and how precincts come in all shapes and sizes.
**Map of Baltimore City Precincts**
![alt text](https://github.com/tberkery/Baltimore-City-Voting-Clusters/blob/main/Visual%20of%20Baltimore%20City%20Precincts.jpg)
Background diagram courtesy of [Baltimore City Government Website](http://boe.baltimorecity.gov/sites/default/files/CouncilDistricts_WardsPrecincts_tabloid-2012_1.pdf).
With these precincts as context, using a three cluster analysis, the three clusters are as follows (colored in in the prior visualization):
Precinct 006-005 = blue = east of the Inner Harbor = near McElderly Park/Linwood
Precinct 006-001 = green = east of the Inner Harbor = near McElderly Park/Linwood
Precinct 013-002 = purple = slightly north and west of Johns Hopkins University = Hampden
When it comes to the characteristics of each cluster, the first cluster (Precinct 006-005) appears to be highly populated (it has more of both democrats and republicans than average) and to have above average voter turnout. From an elections perspective, a district that has lots of people on both sides that gets a higher-than-average amount to turn out is likely a desirable outcome. From the perspective of people taking charge of the politicians who represent them, this cluster is largely the gold standard. Other precincts closely aligned with this cluster include the Inner Harbor and Downtown Baltimore.
The second cluster (precinct 006-001) is characterized by having both less democrats and replubicans than average (indicating a lower-than-average amount of party-affiliated voters) and lower-than-average voter turnout rates. This is an area that activists of both parties would likely seek out with the goal of encouraging increased involvement with elections. Other precincts closely aligned with this cluster include Harwood and Abel (two neighborhoods not too far east of Hopkins).
Lastly, the third cluster (precinct 013-002) is characterized by having fewer Democrats than average and more Republicans than average along with a higher-than-average voter turnout rate. This corresponds to the Hampden neighborhood, and other neighborhoods closely aligned with this cluster include Fells Point. The neighborhoods included in this third cluster tend to be among Baltimore's more affluent and developed neighborhoods. Considering nationwide demographic trends, it isn't surprising to see that this cluster is characterized by increased support of Republican candidates and increased voter turnout.
So, in relation to the overall business question, when grouping Baltimore City precincts into three clusters by party affiliation, population, and voter turnout, the clusters basically take the following forms: (1) high number of party-registered individuals and a high voter turnout rate, (2) a low number of party-registered individuals and a low voter turnout rate, and (3) high number of Republicans and high turnout rate.
**Map of Baltimore City Precincts Colored by Cluster**
![alt text](https://github.com/tberkery/Baltimore-City-Voting-Clusters/blob/main/Baltimore%20Clusters%20Map.jpg)
Background diagram courtesy of [Baltimore City Government Website](http://boe.baltimorecity.gov/sites/default/files/CouncilDistricts_WardsPrecincts_tabloid-2012_1.pdf).
This visual shows which anchor cluster every Baltimore City precinct maps to, with blue mapping to the first cluster, green to the second, and purple to the third. While there is naturally a lot of variety, there are some patterns.
The second cluster (low amounts of registered Democrats and Republicans and low voter turnout) characterizes the majority of the western and eastern parts of central Baltimore and nearly all o fBaltimore surrounding and south of Interstate 95. With some exceptions, these areas tend to be less economically advantaged. This cluster analysis suggests that there is reason to believe there may be a relationship between economic resources and election participation. For example, perhaps precincts falling into the second cluster have low numbers of registered Democrats and Republicans and low voter turnout because these neighborhoods tend to be disadvantaged economically and lack the work flexibility or transportation to make it to the polls on eleciton day. Exploring these factors further by incorporating them into the cluster analysis would make it possible to better understand the underlying reasons behind the number of party-affiliated individuals for the Democrat and Republican parties and what truly drives voter turnout.
Moreover, this visual shows that very metropolitan, trendy areas like the Inner Harbor, Fells Point, and Hampden tend to have higher-than-average numbers of Republicans and higher-than-average voter turnout. Given that this is largely the oppposite of the areas of the first cluster indicated in green in terms of skewing towards people with lots of economic resources, this similarly provides evidence that there may be a strong relationship between economic resources and election participation.
Lastly, we have the areas in blue corresponding to the first cluster, which are more sporadic with the exception of frequently occurring in Northern Baltimore. It's harder to generalize and see a clear pattern for these areas, but they correspond to the desirable outcome of lots of Republicans and Democrats and higher-than-average voter turnout (i.e. higher-than-average election participation).
When it comes to future research, I think it would make sense to include additional measures of economic equality, access to resources and transportation, and the demographics reflecting the diversity (or lack thereof) of each precinct. This would make it possible to delve deeper than generalizations and top-level knowledge of the Baltimore community when exploring the underlying reasons for the voting patterns unveiled in this cluster analysis.
# Instructions for Replicating Analysis
[See instructions here.](https://github.com/tberkery/Baltimore-City-Voting-Clusters/blob/main/Instructions%20for%20Replicating%20Analysis.pdf)
| 281d43c09994eae9ad694e1f4d9347b1586f0ad3 | [
"Markdown"
] | 1 | Markdown | tberkery/Baltimore-City-Voting-Clusters | e00d4cbf244097de5aa462433095fdc5b0d14b5c | 9fd986b5814ff470cb985a414d162857fa11205c |
refs/heads/master | <file_sep>#include "./common/rebrick_config.h"
#include "cmocka.h"
static int setup(void**state){
unused(state);
fprintf(stdout,"**** %s ****\n",__FILE__);
return 0;
}
static int teardown(void **state){
unused(state);
return 0;
}
static void config_object_create_destroy_success(void **start){
unused(start);
rebrick_config_t *config=NULL;
int32_t result;
result=rebrick_config_new(&config);
assert_true(result>=0);
assert_non_null(config);
assert_string_equal(config->type_name,"rebrick_config_t");
rebrick_config_destroy(config);
}
static void config_object_listens_success(){
rebrick_config_t *config=NULL;
int32_t result;
result=rebrick_config_new(&config);
assert_true(result>=0);
assert_non_null(config);
//buradaki 9090 değeri Makefile içinden geliyor
assert_int_equal(config->listen_port,9090);
assert_int_equal(config->listen_family,REBRICK_IPV4_IPV6);
rebrick_config_destroy(config);
}
int test_rebrick_config(void) {
const struct CMUnitTest tests[] = {
cmocka_unit_test(config_object_create_destroy_success),
cmocka_unit_test(config_object_listens_success)
};
return cmocka_run_group_tests(tests, setup, teardown);
}
<file_sep>
#ifndef __REBRICK_HTTPSOCKET_H__
#define __REBRICK_HTTPSOCKET_H__
#include "rebrick_http.h"
struct rebrick_httpsocket;
/**
* @brief after a http header parsed, executes this callback
* @param socket, which socket
* @param header received header
* @param status, result of parsing, parsed successfully or error
*/
typedef void (*rebrick_on_http_header_received_callback_t)(struct rebrick_socket *socket, void *callback_data, rebrick_http_header_t *header);
/**
* @brief after header parsed finished, when body data starts to come,
* this callback trigger,this is a synonym
* @see rebrick_on_data_received_callback_t
*/
typedef rebrick_on_data_received_callback_t rebrick_on_http_body_received_callback_t;
/**
* @brief http socket structure
* allways executes callback when new data arrives
*
*/
public_ typedef struct rebrick_httpsocket
{
base_ssl_socket();
private_ rebrick_on_connection_accepted_callback_t override_override_on_connection_accepted;
private_ rebrick_on_connection_closed_callback_t override_override_on_connection_closed;
private_ rebrick_on_data_received_callback_t override_override_on_data_received;
private_ rebrick_on_data_sended_callback_t override_override_on_data_sended;
private_ rebrick_on_error_occured_callback_t override_override_on_error_occured;
private_ rebrick_on_http_header_received_callback_t on_http_header_received;
private_ rebrick_on_http_body_received_callback_t on_http_body_received;
private_ rebrick_tls_context_t *override_override_tls_context;
private_ void *override_override_callback_data;
public_ readonly_ rebrick_http_header_t *header;
private_ rebrick_buffer_t *tmp_buffer;
private_ int32_t is_header_parsed;
public_ size_t header_len;
public_ readonly_ size_t content_received_length;
struct{
struct phr_header headers[REBRICK_HTTP_MAX_HEADERS];
const char *method, *path;
int minor_version;
size_t method_len, path_len, num_headers;
int32_t status;
const char *status_msg;
size_t status_msg_len;
size_t pos;
}parsing_params;
} rebrick_httpsocket_t;
#define cast_to_http_socket(x) cast(x,rebrick_httpsocket_t*);
int32_t rebrick_httpsocket_new(rebrick_httpsocket_t **socket,rebrick_tls_context_t *tls,rebrick_sockaddr_t addr, void *callback_data,
rebrick_on_connection_accepted_callback_t on_connection_accepted,
rebrick_on_connection_closed_callback_t on_connection_closed,
rebrick_on_data_received_callback_t on_data_received,
rebrick_on_data_sended_callback_t on_data_sended,
rebrick_on_error_occured_callback_t on_error_occured, int32_t backlog_or_isclient,
rebrick_on_http_header_received_callback_t on_http_header_received,
rebrick_on_http_body_received_callback_t on_http_body_received);
int32_t rebrick_httpsocket_init(rebrick_httpsocket_t *socket,rebrick_tls_context_t *tls,rebrick_sockaddr_t addr, void *callback_data,
rebrick_on_connection_accepted_callback_t on_connection_accepted,
rebrick_on_connection_closed_callback_t on_connection_closed,
rebrick_on_data_received_callback_t on_data_received,
rebrick_on_data_sended_callback_t on_data_sended,
rebrick_on_error_occured_callback_t on_error_occured, int32_t backlog_or_isclient,
rebrick_on_http_header_received_callback_t after_http_request_received,
rebrick_on_http_body_received_callback_t after_http_body_received,
rebrick_tcpsocket_create_client_t create_client);
int32_t rebrick_httpsocket_destroy(rebrick_httpsocket_t *socket);
int32_t rebrick_httpsocket_send(rebrick_httpsocket_t *socket, char *buffer, size_t len, rebrick_clean_func_t cleanfunc);
int32_t rebrick_httpsocket_reset(rebrick_httpsocket_t *socket);
#endif<file_sep>#include "rebrick_util.h"
static int is_random_initialized = 0; //eğer random initialize edilmemiş ise init edit kullanacağım
int rebrick_util_str_endswith(const char *domainname, const char *search)
{
if (domainname && search)
{
int len1 = strlen(domainname);
int len2 = strlen(search);
//eğer google.com icinde www.google.com
if (len1 < len2)
return 0;
if (strncmp(&domainname[len1 - len2], search, len2) == 0)
return 1;
}
return 0;
}
rebrick_linked_item_t *rebrick_util_linked_item_create(size_t len, rebrick_linked_item_t *previous)
{
rebrick_linked_item_t *item = new(rebrick_linked_item_t);
if (item == NULL)
return NULL;
fill_zero(item, sizeof(rebrick_linked_item_t));
strcpy(item->type_name, "rebrick_linked_item_t");
item->data = malloc(len);
if (item->data == NULL)
{
free(item);
return NULL;
}
item->len = len;
if (previous)
{
previous->next = item;
item->prev = previous;
}
return item;
}
/*
* @brief item ve sonraki elemenları siler
* @return kendinden önceki elemanı döner
*/
rebrick_linked_item_t *rebrick_util_linked_item_destroy(rebrick_linked_item_t *item)
{
if (item == NULL)
return NULL;
rebrick_linked_item_t *previous = item->prev;
if (item->next)
{
rebrick_util_linked_item_destroy(item->next);
item->next = NULL;
}
if (item->data)
{
free(item->data);
item->data = NULL;
}
if (item->prev)
item->prev->next = NULL;
free(item);
return previous;
}
size_t rebrick_util_linked_item_count(const rebrick_linked_item_t *item)
{
size_t count = 0;
if (item == NULL)
return count;
do
{
count++;
} while ((item = item->next));
return count;
}
rebrick_linked_item_t *rebrick_util_linked_item_next(rebrick_linked_item_t *item, size_t count)
{
while (count && item)
{
item = item->next;
count--;
}
return item;
}
rebrick_linked_item_t *rebrick_util_linked_item_prev(rebrick_linked_item_t *item, size_t count)
{
while (count && item)
{
item = item->prev;
count--;
}
return item;
}
rebrick_linked_item_t *rebrick_util_linked_item_start(rebrick_linked_item_t *item)
{
while (item)
{
if (item->prev == NULL)
break;
item = item->prev;
}
return item;
}
rebrick_linked_item_t *rebrick_util_linked_item_end(rebrick_linked_item_t *item)
{
while (item)
{
if (item->next == NULL)
break;
item = item->next;
}
return item;
}
rebrick_linked_item_t *rebrick_util_create_linked_items(const char *str, const char *splitter)
{
char *split;
size_t len;
char *data;
char *saveptr;
rebrick_linked_item_t *start = NULL, *current = NULL, *temp;
if (str == NULL)
return NULL;
len = strlen(str) + 1;
if (len == 1)
return NULL;
data = malloc(len);
if (data == NULL)
return NULL;
strcpy(data, str);
split = strtok_r(data, splitter,&saveptr);
while (split)
{
len = strlen(split) + 1;
temp = rebrick_util_linked_item_create(len, current);
if (temp == NULL)
{
if (start != NULL)
rebrick_util_linked_item_destroy(start);
free(data);
return NULL;
}
strcpy((char *)temp->data, split);
temp->len = len;
if (start == NULL)
{
start = temp;
}
current = temp;
split = strtok_r(NULL, splitter,&saveptr);
}
free(data);
return start;
}
//0 success,1 error
int rebrick_util_join_linked_items(const rebrick_linked_item_t *list, const char *splitter, char *dest, size_t destlen)
{
size_t splitlength;
if (!list || !splitter || !dest || !destlen)
return 1;
fill_zero(dest, destlen);
splitlength = strlen(splitter);
destlen -= strlen((const char *)list->data);
if (destlen < 1)
return 1;
strcpy(dest, (const char *)list->data);
list = list->next;
while (list)
{
destlen -= strlen((const char *)list->data) + splitlength;
if (destlen < 1)
return 1;
strcat(dest, splitter);
strcat(dest, (const char *)list->data);
list = list->next;
}
return 0;
}
void rebrick_util_str_tolower(char *str)
{
unsigned char *p = (unsigned char *)str;
for (; *p; p++)
*p = tolower(*p);
}
int64_t rebrick_util_micro_time()
{
struct timeval currentTime;
gettimeofday(¤tTime, NULL);
return currentTime.tv_sec * (int64_t)1e6 + currentTime.tv_usec;
}
//random
int rebrick_util_rand()
{
if (!is_random_initialized)
{
is_random_initialized = 1;
srand(time(NULL));
}
return rand();
}
char *rebrick_util_time_r(char * str){
time_t current_time=time(NULL);
ctime_r(¤t_time,str);
//remove \n
str[strlen(str)-1]=0;
return str;
}
int32_t rebrick_util_addr_to_roksit_addr(const struct sockaddr *addr, rebrick_sockaddr_t *sock){
if(addr->sa_family==AF_INET){
memcpy(&sock->v4,addr,sizeof(struct sockaddr_in));
}
if(addr->sa_family==AF_INET6){
memcpy(&sock->v6,addr,sizeof(struct sockaddr_in6));
}
return REBRICK_SUCCESS;
}
int32_t rebrick_util_addr_to_ip_string(const rebrick_sockaddr_t *sock,char buffer[REBRICK_IP_STR_LEN]){
if(sock->base.sa_family==AF_INET){
uv_ip4_name(&sock->v4,buffer,16);
}
if(sock->base.sa_family==AF_INET6){
uv_ip6_name(&sock->v6,buffer,45);
}
return REBRICK_SUCCESS;
}
int32_t rebrick_util_addr_to_port_string(const rebrick_sockaddr_t *sock,char buffer[REBRICK_PORT_STR_LEN]){
if(sock->base.sa_family==AF_INET){
sprintf(buffer,"%d",ntohs(sock->v4.sin_port));
}
if(sock->base.sa_family==AF_INET6){
sprintf(buffer,"%d",ntohs(sock->v6.sin6_port));
}
return REBRICK_SUCCESS;
}
int32_t rebrick_util_to_socket(rebrick_sockaddr_t *sock, const char *ip,const char*port){
if (uv_ip6_addr(ip, atoi(port), cast(&sock->v6, struct sockaddr_in6 *)) < 0)
{
if (uv_ip4_addr(ip, atoi(port), cast(&sock->v4, struct sockaddr_in *)) < 0)
{
return REBRICK_ERR_BAD_IP_PORT_ARGUMENT;
}
}
return REBRICK_SUCCESS;
}
int32_t rebrick_util_ip_port_to_addr(const char *ip,const char*port,rebrick_sockaddr_t *sock){
fill_zero(sock,sizeof(rebrick_sockaddr_t));
if (uv_ip6_addr(ip, atoi(port), cast(&sock->v6, struct sockaddr_in6 *)) < 0)
{
if (uv_ip4_addr(ip, atoi(port), cast(&sock->v4, struct sockaddr_in *)) < 0)
{
return REBRICK_ERR_BAD_IP_PORT_ARGUMENT;
}
}
return REBRICK_SUCCESS;
}
int32_t rebrick_util_file_read_allbytes(const char *file,char **buffer,size_t *len){
char current_time_str[32] = {0};
unused(current_time_str);
FILE *fileptr;
int64_t filelen;
fileptr=fopen(file,"rb");
if(!fileptr)
return REBRICK_ERR_BAD_ARGUMENT;
fseek(fileptr,0,SEEK_END);
filelen=ftell(fileptr);
rewind(fileptr);
char *temp=malloc(filelen+1);
if_is_null_then_die(temp,"malloc problem\n");
fill_zero(temp,filelen+1);
fread(temp,filelen,1,fileptr);
fclose(fileptr);
*buffer=temp;
*len=filelen;
return REBRICK_SUCCESS;
}
int32_t rebrick_util_ip_equal(const rebrick_sockaddr_t *src,const rebrick_sockaddr_t *dst){
if(!src || !dst)
return 0;
if(src->base.sa_family==AF_INET)
return memcmp(&src->v4.sin_addr,&dst->v4.sin_addr,sizeof(struct in_addr))==0?1:0;
return memcmp(&src->v6.sin6_addr,&dst->v6.sin6_addr,sizeof(struct in6_addr))==0?1:0;
}
<file_sep>
#ifndef __REBRICK_SOCKET_H__
#define __REBRICK_SOCKET_H__
#include "../common/rebrick_common.h"
#include "../common/rebrick_log.h"
#include "../lib/utlist.h"
struct rebrick_socket;
/**
* @brief after data received, this function is called
* @param socket which socket used
* @param callback_data , this parameter is setted when called rebrick_xxxsocket_new(......,callback_data,.......)
* @param addr from which addr
* @param buffer data
* @param len buffer lenght
*/
typedef void (*rebrick_on_data_received_callback_t)(struct rebrick_socket *socket, void *callback_data, const struct sockaddr *addr, const char *buffer, ssize_t len);
/**
* @brief after data sended this function is called
* @param socket which socket used
* @param callback_data, this parameter is setted when called rebrick_xxxsocket_new(......,callback_data,.......)
* @param source, this parameters used for source detection
*/
typedef void (*rebrick_on_data_sended_callback_t)(struct rebrick_socket *socket, void *callback_data,void *source);
/**
* @brief after error this function is called
* @param socket which socket used
* @param callback_data, this parameter is setted when called rebrick_xxxsocket_new(......,callback_data,.......)
* @param after_sendata, this parameters will be sended to this function
* @param status, result of operation, if status=0 SUCCESS otherwise ERROR
*/
typedef void (*rebrick_on_error_occured_callback_t)(struct rebrick_socket *socket, void *callback_data,int error);
//////////////// rebrick clean func //////////////////////
typedef void (*rebrick_clean_func_ptr_t)(void *ptr);
typedef struct rebrick_clean_func{
base_object();
//free function
public_ rebrick_clean_func_ptr_t func;
//ptr for free
public_ void *ptr;
//any data for you
union{
int32_t source;
void *ptr;
}anydata;
}rebrick_clean_func_t;
#define rebrick_clean_func_clone(x,y) \
rebrick_clean_func_t *newptr=new(rebrick_clean_func_t);\
constructor(newptr,rebrick_clean_func_t);\
newptr->func=(x)->func;\
newptr->ptr=(x)->ptr;\
(y)=newptr;
////////////////////////// base socket //////////////////////////////
#define base_socket() \
base_object(); \
public_ readonly_ char bind_ip[REBRICK_IP_STR_LEN];\
public_ readonly_ char bind_port[REBRICK_PORT_STR_LEN];\
\
protected_ uv_loop_t *loop;\
protected_ union{\
uv_tcp_t tcp;\
uv_udp_t udp;\
}handle;\
public_ readonly_ rebrick_sockaddr_t bind_addr;\
protected_ rebrick_on_data_received_callback_t on_data_received;\
protected_ rebrick_on_data_sended_callback_t on_data_sended;\
protected_ rebrick_on_error_occured_callback_t on_error_occured;\
protected_ void *callback_data;
public_ typedef struct rebrick_socket{
base_socket();
}rebrick_socket_t;
#define cast_to_base_socket(x) cast((x),rebrick_socket_t*)
#endif<file_sep>#include "rebrick_tls.h"
//struct rebrick_tls_checkitem_list_t *tls_after_io_checklist = NULL;
//struct rebrick_tls_checkitem_list_t *tls_before_io_checklist = NULL;
//multi init protector
static int32_t tls_init_finished = 0;
static void after_io(uv_check_t *check)
{
char current_time_str[32] = {0};
unused(current_time_str);
if (check && check->data)
{
rebrick_tls_checkitem_list_t *checklist = cast(check->data, rebrick_tls_checkitem_list_t *);
rebrick_tls_checkitem_t *tmp;
DL_FOREACH(checklist->head, tmp)
{
tmp->func(tmp->socket);
}
}
}
int32_t rebrick_after_io_list_add(rebrick_tls_checkitem_func func,struct rebrick_tlssocket *socket)
{
char current_time_str[32] = {0};
unused(current_time_str);
rebrick_tls_checkitem_t *tmp;
int32_t founded = 0;
//burası hash table yapılsa daha hızlı çalışır
DL_FOREACH(tls_after_io_checklist->head, tmp)
{
if (tmp->socket == socket)
{
founded = 1;
break;
}
}
if (!founded)
{
rebrick_tls_checkitem_t *item = new (rebrick_tls_checkitem_t);
constructor(item, rebrick_tls_checkitem_t);
item->socket = socket;
item->func=func;
DL_APPEND(tls_after_io_checklist->head, item);
}
return REBRICK_SUCCESS;
}
int32_t rebrick_after_io_list_remove(struct rebrick_tlssocket *socket)
{
char current_time_str[32] = {0};
unused(current_time_str);
rebrick_tls_checkitem_t *tmp, *el;
if(tls_after_io_checklist)
DL_FOREACH_SAFE(tls_after_io_checklist->head, el, tmp)
{
if (el->socket == socket){
DL_DELETE(tls_after_io_checklist->head, el);
free(el);
}
}
return REBRICK_SUCCESS;
}
int32_t rebrick_before_io_list_add(rebrick_tls_checkitem_func func,struct rebrick_tlssocket *socket)
{
char current_time_str[32] = {0};
unused(current_time_str);
rebrick_tls_checkitem_t *tmp;
int32_t founded = 0;
DL_FOREACH(tls_before_io_checklist->head, tmp)
{
if (tmp->socket == socket)
{
founded = 1;
break;
}
}
if (!founded)
{
rebrick_tls_checkitem_t *item = new (rebrick_tls_checkitem_t);
constructor(item, rebrick_tls_checkitem_t);
item->socket = socket;
item->func=func;
DL_APPEND(tls_before_io_checklist->head, item);
}
return REBRICK_SUCCESS;
}
int32_t rebrick_before_io_list_remove(struct rebrick_tlssocket *socket)
{
char current_time_str[32] = {0};
unused(current_time_str);
rebrick_tls_checkitem_t *tmp, *el;
if(tls_before_io_checklist)
DL_FOREACH_SAFE(tls_before_io_checklist->head, el, tmp)
{
if (el->socket == socket){
DL_DELETE(tls_before_io_checklist->head, el);
free(el);
}
}
return REBRICK_SUCCESS;
}
static uv_check_t check;
extern int32_t rebrick_tlssocket_change_context(struct rebrick_tlssocket *socket,const char *servername);
static int ssl_servername_cb(SSL *s, int *ad, void *arg)
{
unused(s);
unused(ad);
unused(arg);
char current_time_str[32] = {0};
unused(current_time_str);
int32_t result;
rebrick_tls_context_t *context = cast(arg,rebrick_tls_context_t*);
if(!context){
rebrick_log_fatal("sni cannot find, context is null\n");
return SSL_TLSEXT_ERR_ALERT_FATAL;
}
rebrick_tls_ssl_t *el,*tmp;
DL_FOREACH_SAFE(context->sni_pending_list,el,tmp){
if(el->ssl==s){
const char *servername = SSL_get_servername(s, TLSEXT_NAMETYPE_host_name);
if(servername || strlen(servername)){
struct rebrick_tlssocket *tlssocket=cast(el->ref,struct rebrick_tlssocket*);
result=rebrick_tlssocket_change_context(tlssocket,servername);
if(result<0)
return SSL_TLSEXT_ERR_ALERT_FATAL;
}
DL_DELETE(context->sni_pending_list,el);
break;
}
}
/* if (servername != NULL && p->biodebug != NULL) {
const char *cp = servername;
unsigned char uc;
BIO_printf(p->biodebug, "Hostname in TLS extension: \"");
while ((uc = *cp++) != 0)
BIO_printf(p->biodebug,
isascii(uc) && isprint(uc) ? "%c" : "\\x%02x", uc);
BIO_printf(p->biodebug, "\"\n");
}
if (p->servername == NULL)
return SSL_TLSEXT_ERR_NOACK;
if (servername != NULL) {
if (strcasecmp(servername, p->servername))
return p->extension_error;
if (ctx2 != NULL) {
BIO_printf(p->biodebug, "Switching server context.\n");
SSL_set_SSL_CTX(s, ctx2);
}
} */
return SSL_TLSEXT_ERR_OK;
}
int32_t rebrick_tls_init()
{
if (!tls_init_finished)
{
char current_time_str[32] = {0};
int32_t result;
unused(current_time_str);
OPENSSL_init_ssl(0,NULL);
OpenSSL_add_all_digests();
SSL_load_error_strings();
ERR_load_crypto_strings();
//create a new context for SNI(server name indication)
rebrick_tls_context_t *context_sni;
result=rebrick_tls_context_new(&context_sni,REBRICK_TLS_CONTEXT_SNI,SSL_VERIFY_NONE, SSL_SESS_CACHE_BOTH, SSL_OP_ALL,0, REBRICK_TLS_SNI_FAKE_CERT_PRV_FILE, REBRICK_TLS_SNI_FAKE_CERT_PRV_FILE);
if(result<0){
return result;
}
SSL_CTX_set_tlsext_servername_callback(context_sni->tls_ctx, ssl_servername_cb);
SSL_CTX_set_tlsext_servername_arg(context_sni->tls_ctx,context_sni);
//after io part
tls_after_io_checklist=new(rebrick_tls_checkitem_list_t);
constructor(tls_after_io_checklist,rebrick_tls_checkitem_list_t);
tls_before_io_checklist=new(rebrick_tls_checkitem_list_t);
constructor(tls_before_io_checklist,rebrick_tls_checkitem_list_t);
uv_check_init(uv_default_loop(), &check);
check.data = tls_after_io_checklist;
uv_check_start(&check, after_io);
tls_init_finished = 1;
}
return REBRICK_SUCCESS;
}
int32_t rebrick_tls_cleanup(){
if(tls_init_finished){
rebrick_tls_context_t *context_sni=NULL;
rebrick_tls_context_get(REBRICK_TLS_CONTEXT_SNI,&context_sni);
if(context_sni)
rebrick_tls_context_destroy(context_sni);
//OPENSSL_cleanup();
EVP_cleanup();
ENGINE_cleanup();
CONF_modules_unload(1);
EVP_cleanup();
CRYPTO_cleanup_all_ex_data();
//ERR_remove_state(uv_os_getpid());
ERR_free_strings();
if(tls_after_io_checklist)
free(tls_after_io_checklist);
if(tls_before_io_checklist)
free(tls_before_io_checklist);
tls_after_io_checklist=NULL;
tls_before_io_checklist=NULL;
uv_check_stop(&check);
uv_close(cast(&check,uv_handle_t*),NULL);
tls_init_finished=0;
}
return REBRICK_SUCCESS;
}
struct rebrick_tls_context_hashitem
{
base_object();
char key[REBRICK_TLS_KEY_LEN];
rebrick_tls_context_t *ctx;
UT_hash_handle hh;
};
struct rebrick_tls_context_hashitem *ctx_map = NULL;
int32_t rebrick_tls_context_new(rebrick_tls_context_t **context, const char *key, int32_t ssl_verify, int32_t session_mode, int32_t options,int32_t clearoptions, const char *certificate_file, const char *private_file)
{
char current_time_str[32] = {0};
unused(current_time_str);
struct rebrick_tls_context_hashitem *out;
//find in hash map
HASH_FIND_STR(ctx_map, key, out);
if (out)
{
*context = out->ctx;
return REBRICK_SUCCESS;
}
rebrick_tls_context_t *ctx = new (rebrick_tls_context_t);
constructor(ctx, rebrick_tls_context_t);
ctx->tls_ctx = SSL_CTX_new(TLS_method());
if (!ctx->tls_ctx)
{
rebrick_log_fatal("ssl init failed\n");
free(ctx);
return REBRICK_ERR_TLS_INIT;
}
//only load not fake files
//because every server context must have a cert_file path
//we will copy file name but we will not load cert files
//also private keys
if (certificate_file && strcmp(certificate_file,REBRICK_TLS_SNI_FAKE_CERT_PRV_FILE) && SSL_CTX_use_certificate_file(ctx->tls_ctx, certificate_file, SSL_FILETYPE_PEM) <= 0)
{
rebrick_log_fatal("ssl cerfiticate file %s loading failed\n", certificate_file);
ERR_print_errors_fp(stderr);
SSL_CTX_free(ctx->tls_ctx);
free(ctx);
return REBRICK_ERR_TLS_INIT;
}
if(certificate_file)
snprintf(ctx->cert_file,REBRICK_TLS_FILE_MAX_LEN,"%s",certificate_file);
if (private_file && strcmp(private_file,REBRICK_TLS_SNI_FAKE_CERT_PRV_FILE) && SSL_CTX_use_PrivateKey_file(ctx->tls_ctx, private_file, SSL_FILETYPE_PEM) <= 0)
{
rebrick_log_fatal("ssl private file %s loading failed\n", private_file);
ERR_print_errors_fp(stderr);
SSL_CTX_free(ctx->tls_ctx);
free(ctx);
return REBRICK_ERR_TLS_INIT;
}
if(private_file && strcmp(private_file,REBRICK_TLS_SNI_FAKE_CERT_PRV_FILE) && !SSL_CTX_check_private_key(ctx->tls_ctx)){
rebrick_log_fatal("ssl private file %s loading failed\n", private_file);
ERR_print_errors_fp(stderr);
SSL_CTX_free(ctx->tls_ctx);
free(ctx);
return REBRICK_ERR_TLS_INIT;
}
if(private_file)
snprintf(ctx->prv_file,REBRICK_TLS_FILE_MAX_LEN,"%s",private_file);
strncpy(ctx->key, key, REBRICK_TLS_KEY_LEN - 1);
SSL_CTX_set_verify(ctx->tls_ctx, ssl_verify, NULL);
SSL_CTX_set_options(ctx->tls_ctx, options);
SSL_CTX_set_session_cache_mode(ctx->tls_ctx, session_mode);
if(clearoptions)
SSL_CTX_clear_options(ctx->tls_ctx, clearoptions);
struct rebrick_tls_context_hashitem *hash;
hash = new (struct rebrick_tls_context_hashitem);
constructor(hash, struct rebrick_tls_context_hashitem);
hash->ctx = ctx;
strncpy(hash->key, ctx->key, REBRICK_TLS_KEY_LEN - 1);
HASH_ADD_STR(ctx_map, key, hash);
rebrick_log_debug("%s ssl context created\n", key);
*context = ctx;
return REBRICK_SUCCESS;
}
int32_t rebrick_tls_context_destroy(rebrick_tls_context_t *context)
{
if (context)
{
//clear the sni pending list
rebrick_tls_ssl_t *el,*tmp;
DL_FOREACH_SAFE(context->sni_pending_list,el,tmp){
DL_DELETE(context->sni_pending_list,el);
}
if (context->tls_ctx)
{
//remove from hash map
struct rebrick_tls_context_hashitem *out;
HASH_FIND_STR(ctx_map, context->key, out);
if (out)
{
HASH_DEL(ctx_map, out);
free(out);
}
//then dispose ctx
SSL_CTX_free(context->tls_ctx);
}
free(context);
}
return REBRICK_SUCCESS;
}
int32_t rebrick_tls_context_get(const char *key, rebrick_tls_context_t **context)
{
char current_time_str[32] = {0};
unused(current_time_str);
struct rebrick_tls_context_hashitem *out;
//find in hash map
HASH_FIND_STR(ctx_map, key, out);
if (out)
{
rebrick_log_debug("%s ssl context found\n", key);
*context = out->ctx;
}
else
{
rebrick_log_debug("%s ssl context not found\n", key);
*context = NULL;
}
return REBRICK_SUCCESS;
}
int32_t rebrick_tls_ssl_new(rebrick_tls_ssl_t **ssl, const rebrick_tls_context_t *context)
{
char current_time_str[32] = {0};
unused(current_time_str);
if (!context || !context->tls_ctx)
{
return REBRICK_ERR_BAD_ARGUMENT;
}
SSL *tmp = SSL_new(context->tls_ctx);
if (!ssl)
{
rebrick_log_fatal("new ssl with key %s failed\n", context->key);
ERR_print_errors_fp(stderr);
return REBRICK_ERR_TLS_NEW;
}
if (rebrick_tls_context_is_server(context))
SSL_set_accept_state(tmp);
else
SSL_set_connect_state(tmp);
BIO *read = BIO_new(BIO_s_mem());
if (!read)
{
rebrick_log_fatal("new bio read with key %s failed\n", context->key);
SSL_free(tmp);
return REBRICK_ERR_TLS_ERR;
}
BIO *write = BIO_new(BIO_s_mem());
if (!write)
{
rebrick_log_fatal("new bio write with key %s failed\n", context->key);
BIO_free(read);
SSL_free(tmp);
return REBRICK_ERR_TLS_ERR;
}
BIO_set_nbio(read, 1);
BIO_set_nbio(write, 1);
rebrick_tls_ssl_t *state = new (rebrick_tls_ssl_t);
constructor(state, rebrick_tls_ssl_t);
state->ssl = tmp;
state->read = read;
state->write = write;
SSL_set_bio(tmp, read, write);
*ssl = state;
return REBRICK_SUCCESS;
}
int32_t rebrick_tls_ssl_new3(rebrick_tls_ssl_t **ssl, const rebrick_tls_context_t *context,const char *servername){
char current_time_str[32] = {0};
unused(current_time_str);
if(!servername){
rebrick_log_fatal("servername is null\n");
return REBRICK_ERR_BAD_ARGUMENT;
}
int32_t result=rebrick_tls_ssl_new(ssl,context);
if(result<0)
return result;
SSL_set_tlsext_host_name((*ssl)->ssl,servername);
return REBRICK_SUCCESS;
}
int32_t rebrick_tls_ssl_new2(rebrick_tls_ssl_t **ssl,const char *server_indication_name){
char current_time_str[32] = {0};
unused(current_time_str);
unused(server_indication_name);
rebrick_tls_context_t *context;
//get context for SNI
int32_t result=rebrick_tls_context_get(REBRICK_TLS_CONTEXT_SNI,&context);
if(result<0){
rebrick_log_fatal("sni context not found\n");
return REBRICK_ERR_BAD_ARGUMENT;
}
rebrick_tls_ssl_t *state;
result=rebrick_tls_ssl_new(&state,context);
if(result<0)
return result;
//add to SNI_CONTEXT pending list
//this is import
DL_APPEND(context->sni_pending_list,state);
*ssl = state;
return REBRICK_SUCCESS;
}
int32_t rebrick_tls_ssl_destroy(rebrick_tls_ssl_t *tls)
{
if (tls)
{
if (tls->ssl)
{
SSL_clear(tls->ssl);
SSL_free(tls->ssl);
}
free(tls);
}
return REBRICK_SUCCESS;
}<file_sep>#include "./http/rebrick_httpsocket.h"
#include "cmocka.h"
#include <unistd.h>
#define loop(var,a,x) \
var=a; \
while (var-- && (x)){ usleep(100); uv_run(uv_default_loop(), UV_RUN_NOWAIT);}
static rebrick_tls_context_t *context_verify_none = NULL;
static int setup(void**state){
unused(state);
rebrick_tls_init();
rebrick_tls_context_new(&context_verify_none, "client", SSL_VERIFY_NONE, SSL_SESS_CACHE_BOTH, SSL_OP_ALL,0, NULL, NULL);
fprintf(stdout,"**** %s ****\n",__FILE__);
return 0;
}
static int teardown(void **state){
unused(state);
int32_t loop_counter;
rebrick_tls_context_destroy(context_verify_none);
context_verify_none = NULL;
rebrick_tls_cleanup();
loop(loop_counter,100,TRUE);
uv_loop_close(uv_default_loop());
return 0;
}
static void on_error_occured_callback(rebrick_socket_t *socket,void *callback,int error){
unused(socket);
unused(callback);
unused(error);
rebrick_tlssocket_destroy(cast(socket, rebrick_tlssocket_t *));
}
static int32_t is_connected = FALSE;
static void on_connection_accepted_callback(rebrick_socket_t *socket, void *callback_data, const struct sockaddr *addr, void *client_handle)
{
is_connected = TRUE;
unused(callback_data);
unused(addr);
unused(client_handle);
unused(socket);
}
static int32_t is_connection_closed = 0;
static void on_connection_closed_callback(rebrick_socket_t *socket, void *callback_data)
{
unused(callback_data);
unused(socket);
is_connection_closed = 1;
}
static int32_t is_datareaded = FALSE;
static int32_t totalreaded_len = 0;
static char readedbuffer[131072] = {0};
static void on_data_read_callback(rebrick_socket_t *socket, void *callback_data, const struct sockaddr *addr, const char *buffer, ssize_t len)
{
unused(addr);
unused(socket);
unused(addr);
unused(buffer);
unused(len);
unused(callback_data);
is_datareaded = TRUE;
fill_zero(readedbuffer, sizeof(readedbuffer));
memcpy(readedbuffer, buffer, len);
totalreaded_len += len;
}
static int32_t sended=FALSE;
static void on_data_send(rebrick_socket_t *socket,void *callback,void *source){
unused(socket);
unused(callback);
unused(source);
sended=TRUE;
}
static int32_t header_received=FALSE;
static void on_http_header_received(rebrick_socket_t *socket,void *callback_data,rebrick_http_header_t *header){
unused(socket);
unused(callback_data);
unused(header);
header_received=TRUE;
}
static int32_t is_bodyreaded = FALSE;
static int32_t totalreadedbody_len = 0;
static char readedbufferbody[131072] = {0};
static void on_body_read_callback(rebrick_socket_t *socket, void *callback_data, const struct sockaddr *addr, const char *buffer, ssize_t len)
{
unused(addr);
unused(socket);
unused(addr);
unused(buffer);
unused(len);
unused(callback_data);
is_bodyreaded = TRUE;
fill_zero(readedbufferbody, sizeof(readedbufferbody));
memcpy(readedbufferbody, buffer, len);
totalreadedbody_len += len;
}
void deletesendata(void *ptr){
if(ptr){
rebrick_buffer_t *buffer=cast(ptr,rebrick_buffer_t *);
rebrick_buffer_destroy(buffer);
}
}
static void http_socket_as_client_create_get(void **start){
unused(start);
int32_t result;
int32_t counter;
rebrick_sockaddr_t destination;
rebrick_util_ip_port_to_addr("127.0.0.1", "9090", &destination);
rebrick_httpsocket_t *socket;
is_connected=FALSE;
result = rebrick_httpsocket_new(&socket, NULL, destination, NULL,
on_connection_accepted_callback,
on_connection_closed_callback,
on_data_read_callback, on_data_send,on_error_occured_callback,0,on_http_header_received,on_body_read_callback);
assert_int_equal(result, 0);
loop(counter,1000,!is_connected);
assert_int_equal(is_connected,TRUE);
rebrick_http_header_t *header;
result=rebrick_http_header_new(&header,"GET", "/api/get",1,1);
assert_int_equal(result,REBRICK_SUCCESS);
rebrick_buffer_t *buffer;
result=rebrick_http_header_to_buffer(header,&buffer);
assert_int_equal(result,REBRICK_SUCCESS);
assert_non_null(buffer);
sended=FALSE;
header_received=FALSE;
is_bodyreaded=FALSE;
rebrick_clean_func_t cleanfunc;
cleanfunc.func=deletesendata;
cleanfunc.ptr=buffer;
//send data
result=rebrick_httpsocket_send(socket,cast(buffer->buf,char*),buffer->len,cleanfunc);
assert_int_equal(result,REBRICK_SUCCESS);
loop(counter,1000,(!sended));
assert_int_equal(sended,TRUE);
loop(counter,100,!header_received);
assert_int_equal(header_received,TRUE);
loop(counter,100,!is_bodyreaded);
assert_int_equal(is_bodyreaded,TRUE);
assert_non_null(socket->header);
assert_int_equal(socket->header->major_version,1);
assert_int_equal(socket->header->minor_version,1);
assert_int_equal(socket->header->is_request,FALSE);
assert_string_equal(socket->header->path,"");
assert_string_equal(socket->header->method,"");
assert_string_equal(socket->header->status_code_str,"OK");
assert_int_equal(socket->header->status_code,200);
const char *value;
rebrick_http_header_get_header(socket->header,"X-Powered-By",&value);
assert_string_equal(value,"Express");
rebrick_http_header_get_header(socket->header,"Content-Type",&value);
assert_string_equal(value,"text/html; charset=utf-8");
rebrick_http_header_get_header(socket->header,"Content-Length",&value);
assert_string_equal(value,"25");
/*rebrick_http_header_get_header(socket->header,"ETag",&value);
assert_string_equal(value,"W/\"19-EE0dTSKO8nU0PWVui0tLx8f6m9I\"");
rebrick_http_header_get_header(socket->header,"Date",&value);
assert_string_equal(value,"Sun, 22 Sep 2019 20:14:00 GMT");*/
rebrick_http_header_get_header(socket->header,"Connection",&value);
assert_string_equal(value,"keep-alive");
assert_string_equal(readedbufferbody,"get captured successfully");
rebrick_http_header_destroy(header);
assert_int_equal(socket->content_received_length,25);
rebrick_httpsocket_reset(socket);
assert_int_equal(socket->content_received_length,0);
assert_null(socket->header);
assert_int_equal(socket->header_len,0);
assert_int_equal(socket->is_header_parsed,0);
assert_null(socket->tmp_buffer);
rebrick_httpsocket_destroy(socket);
loop(counter,100,TRUE);
}
static void http_socket_as_client_create_post(void **start){
unused(start);
int32_t result;
int32_t counter=0;
rebrick_sockaddr_t destination;
rebrick_util_ip_port_to_addr("127.0.0.1", "9090", &destination);
rebrick_httpsocket_t *socket;
is_connected=FALSE;
result = rebrick_httpsocket_new(&socket, NULL, destination, NULL,
on_connection_accepted_callback,
on_connection_closed_callback,
on_data_read_callback, on_data_send,on_error_occured_callback,0,on_http_header_received,on_body_read_callback);
assert_int_equal(result, REBRICK_SUCCESS);
loop(counter,1000,!is_connected);
assert_int_equal(is_connected,TRUE);
char temp[1024];
//body buffer
const char *body="{\"hello\":\"world\"}";
rebrick_buffer_t *bodybuffer;
result=rebrick_buffer_new(&bodybuffer,cast_to_uint8ptr(body),strlen(body),64);
assert_int_equal(result,REBRICK_SUCCESS);
rebrick_http_header_t *header;
result=rebrick_http_header_new(&header,"POST", "/api/post",1,1);
assert_int_equal(result,REBRICK_SUCCESS);
rebrick_http_header_add_header(header,"content-type","application/json");
sprintf(temp,"%ld",bodybuffer->len);
rebrick_http_header_add_header(header,"content-length",temp);
//header buffer
rebrick_buffer_t *buffer;
result=rebrick_http_header_to_buffer(header,&buffer);
assert_int_equal(result,REBRICK_SUCCESS);
assert_non_null(buffer);
sended=FALSE;
header_received=FALSE;
is_bodyreaded=FALSE;
rebrick_clean_func_t cleanfunc;
cleanfunc.func=deletesendata;
cleanfunc.ptr=buffer;
result=rebrick_httpsocket_send(socket,cast(buffer->buf,char*),buffer->len,cleanfunc);
assert_int_equal(result,REBRICK_SUCCESS);
loop(counter,1000,(!sended));
assert_int_equal(sended,TRUE);
sended=FALSE;
rebrick_clean_func_t cleanfunc2;
cleanfunc2.func=deletesendata;
cleanfunc2.ptr=bodybuffer;
result=rebrick_httpsocket_send(socket,cast(bodybuffer->buf,char*),bodybuffer->len,cleanfunc2);
loop(counter,1000,(!sended));
assert_int_equal(sended,TRUE);
loop(counter,100,!header_received);
assert_int_equal(header_received,TRUE);
loop(counter,100,!is_bodyreaded);
assert_int_equal(is_bodyreaded,TRUE);
assert_non_null(socket->header);
assert_int_equal(socket->header->major_version,1);
assert_int_equal(socket->header->minor_version,1);
assert_int_equal(socket->header->is_request,FALSE);
assert_string_equal(socket->header->path,"");
assert_string_equal(socket->header->method,"");
assert_string_equal(socket->header->status_code_str,"OK");
assert_int_equal(socket->header->status_code,200);
const char *value;
rebrick_http_header_get_header(socket->header,"X-Powered-By",&value);
assert_string_equal(value,"Express");
rebrick_http_header_get_header(socket->header,"Content-Type",&value);
assert_string_equal(value,"application/json; charset=utf-8");
rebrick_http_header_get_header(socket->header,"Content-Length",&value);
assert_int_equal(atoi(value),strlen(body));
rebrick_http_header_get_header(socket->header,"Connection",&value);
assert_string_equal(value,"keep-alive");
assert_string_equal(readedbufferbody,body);
rebrick_http_header_destroy(header);
assert_int_equal(socket->content_received_length,strlen(body));
rebrick_httpsocket_reset(socket);
assert_int_equal(socket->content_received_length,0);
assert_null(socket->header);
assert_int_equal(socket->header_len,0);
assert_int_equal(socket->is_header_parsed,0);
assert_null(socket->tmp_buffer);
rebrick_httpsocket_destroy(socket);
loop(counter,100,TRUE);
}
static void http_socket_as_client_create_with_tls_post(void **start){
unused(start);
int32_t result;
int32_t counter=0;
rebrick_sockaddr_t destination;
rebrick_util_ip_port_to_addr("127.0.0.1", "3000", &destination);
rebrick_httpsocket_t *socket;
is_connected=FALSE;
result = rebrick_httpsocket_new(&socket, NULL, destination, NULL,
on_connection_accepted_callback,
on_connection_closed_callback,
on_data_read_callback, on_data_send,on_error_occured_callback,0,on_http_header_received,on_body_read_callback);
assert_int_equal(result, REBRICK_SUCCESS);
loop(counter,1000,!is_connected);
assert_int_equal(is_connected,TRUE);
char temp[1024];
//body buffer
const char *body="{\"hello\":\"world\"}";
rebrick_buffer_t *bodybuffer;
result=rebrick_buffer_new(&bodybuffer,cast_to_uint8ptr(body),strlen(body),64);
assert_int_equal(result,REBRICK_SUCCESS);
rebrick_http_header_t *header;
result=rebrick_http_header_new(&header,"POST", "/api/post",1,1);
assert_int_equal(result,REBRICK_SUCCESS);
rebrick_http_header_add_header(header,"content-type","application/json");
sprintf(temp,"%ld",bodybuffer->len);
rebrick_http_header_add_header(header,"content-length",temp);
//header buffer
rebrick_buffer_t *buffer;
result=rebrick_http_header_to_buffer(header,&buffer);
assert_int_equal(result,REBRICK_SUCCESS);
assert_non_null(buffer);
sended=FALSE;
header_received=FALSE;
is_bodyreaded=FALSE;
rebrick_clean_func_t cleanfunc;
cleanfunc.func=deletesendata;
cleanfunc.ptr=buffer;
result=rebrick_httpsocket_send(socket,cast(buffer->buf,char*),buffer->len,cleanfunc);
assert_int_equal(result,REBRICK_SUCCESS);
loop(counter,1000,(!sended));
assert_int_equal(sended,TRUE);
sended=FALSE;
rebrick_clean_func_t cleanfunc2;
cleanfunc2.func=deletesendata;
cleanfunc2.ptr=bodybuffer;
result=rebrick_httpsocket_send(socket,cast(bodybuffer->buf,char*),bodybuffer->len,cleanfunc2);
loop(counter,1000,(!sended));
assert_int_equal(sended,TRUE);
loop(counter,100,!header_received);
assert_int_equal(header_received,TRUE);
loop(counter,100,!is_bodyreaded);
assert_int_equal(is_bodyreaded,TRUE);
assert_non_null(socket->header);
assert_int_equal(socket->header->major_version,1);
assert_int_equal(socket->header->minor_version,1);
assert_int_equal(socket->header->is_request,FALSE);
assert_string_equal(socket->header->path,"");
assert_string_equal(socket->header->method,"");
assert_string_equal(socket->header->status_code_str,"OK");
assert_int_equal(socket->header->status_code,200);
const char *value;
rebrick_http_header_get_header(socket->header,"X-Powered-By",&value);
assert_string_equal(value,"Express");
rebrick_http_header_get_header(socket->header,"Content-Type",&value);
assert_string_equal(value,"application/json; charset=utf-8");
rebrick_http_header_get_header(socket->header,"Content-Length",&value);
assert_int_equal(atoi(value),strlen(body));
rebrick_http_header_get_header(socket->header,"Connection",&value);
assert_string_equal(value,"keep-alive");
assert_string_equal(readedbufferbody,body);
rebrick_http_header_destroy(header);
assert_int_equal(socket->content_received_length,strlen(body));
rebrick_httpsocket_reset(socket);
assert_int_equal(socket->content_received_length,0);
assert_null(socket->header);
assert_int_equal(socket->header_len,0);
assert_int_equal(socket->is_header_parsed,0);
assert_null(socket->tmp_buffer);
rebrick_httpsocket_destroy(socket);
loop(counter,100,TRUE);
}
int test_rebrick_httpsocket(void) {
const struct CMUnitTest tests[] = {
cmocka_unit_test(http_socket_as_client_create_get),
cmocka_unit_test(http_socket_as_client_create_post)
};
return cmocka_run_group_tests(tests, setup, teardown);
}
<file_sep>#include "rebrick_http.h"
int32_t rebrick_http_key_value_new(rebrick_http_key_value_t **keyvalue, const char *key, const char *value)
{
char current_time_str[32] = {0};
unused(current_time_str);
size_t keylen = 0;
if (key)
keylen = strlen(key);
size_t valuelen = 0;
if (value)
valuelen = strlen(value);
rebrick_http_key_value_t *tmp = malloc(sizeof(rebrick_http_key_value_t));
if_is_null_then_die(tmp, "malloc problem\n");
memset(tmp, 0, sizeof(rebrick_http_key_value_t));
tmp->key = malloc(keylen + 1);
if_is_null_then_die(tmp->key, "malloc problem\n");
memset(tmp->key, 0, keylen + 1);
if (key)
memcpy(tmp->key, key, keylen);
tmp->key_lower=malloc(keylen+1);
if_is_null_then_die(tmp->key_lower,"malloc problem\n");
memset(tmp->key_lower,0,keylen+1);
if(key){
size_t index=0;
while(index<keylen){
*(tmp->key_lower+index)=tolower(*((char*)key+index));
index++;
}
}
tmp->value = malloc(valuelen + 1);
if_is_null_then_die(tmp->value, "malloc problem\n");
memset(tmp->value, 0, valuelen + 1);
if (value)
memcpy(tmp->value, value, valuelen);
tmp->keylen = keylen;
tmp->valuelen = valuelen;
*keyvalue = tmp;
return REBRICK_SUCCESS;
}
int32_t rebrick_http_key_value_new2(rebrick_http_key_value_t **keyvalue, const void *key, size_t keylen, const void *value, size_t valuelen)
{
char current_time_str[32] = {0};
unused(current_time_str);
rebrick_http_key_value_t *tmp = malloc(sizeof(rebrick_http_key_value_t));
if_is_null_then_die(tmp, "malloc problem\n");
memset(tmp, 0, sizeof(rebrick_http_key_value_t));
tmp->key = malloc(keylen+1);
if_is_null_then_die(tmp->key, "malloc problem\n");
memset(tmp->key, 0, keylen+1);
if (key)
memcpy(tmp->key, key, keylen);
tmp->key_lower=malloc(keylen+1);
if_is_null_then_die(tmp->key_lower,"malloc problem\n");
memset(tmp->key_lower,0,keylen+1);
if(key){
size_t index=0;
while(index<keylen){
*(tmp->key_lower+index)=tolower(*((char*)key+index));
index++;
}
}
tmp->value = malloc(valuelen + 1);
if_is_null_then_die(tmp->value, "malloc problem\n");
memset(tmp->value, 0, valuelen + 1);
if (value)
memcpy(tmp->value, value, valuelen);
tmp->keylen = keylen;
tmp->valuelen = valuelen;
*keyvalue = tmp;
return REBRICK_SUCCESS;
}
int32_t rebrick_http_key_value_destroy(rebrick_http_key_value_t *keyvalue)
{
if (keyvalue)
{
if (keyvalue->key)
free(keyvalue->key);
if(keyvalue->key_lower)
free(keyvalue->key_lower);
if (keyvalue->value)
free(keyvalue->value);
free(keyvalue);
}
return REBRICK_SUCCESS;
}
int32_t rebrick_http_header_new(rebrick_http_header_t **header, const char *method, const char *path, int8_t major, int8_t minor)
{
char current_time_str[32] = {0};
unused(current_time_str);
rebrick_http_header_t *tmp = new (rebrick_http_header_t);
constructor(tmp, rebrick_http_header_t);
if (path)
{
size_t path_len = strlen(path);
if (path_len > REBRICK_HTTP_MAX_PATH_LEN - 1)
return REBRICK_ERR_BAD_ARGUMENT;
strcpy(tmp->path, path);
}
if (method)
{
size_t method_len = strlen(method);
if (method_len > REBRICK_HTTP_MAX_METHOD_LEN - 1)
return REBRICK_ERR_BAD_ARGUMENT;
strcpy(tmp->method, method);
}
tmp->is_request=TRUE;
tmp->major_version = major;
tmp->minor_version = minor;
*header = tmp;
return REBRICK_SUCCESS;
}
int32_t rebrick_http_header_new2(rebrick_http_header_t **header,const void *method,size_t method_len,const void *path,size_t path_len,int8_t major,int8_t minor){
char current_time_str[32] = {0};
unused(current_time_str);
rebrick_http_header_t *tmp = new (rebrick_http_header_t);
constructor(tmp, rebrick_http_header_t);
if (path)
{
if (path_len > REBRICK_HTTP_MAX_PATH_LEN - 1)
return REBRICK_ERR_BAD_ARGUMENT;
memcpy(tmp->path, path,path_len);
}
if (method)
{
if (method_len > REBRICK_HTTP_MAX_METHOD_LEN - 1)
return REBRICK_ERR_BAD_ARGUMENT;
memcpy(tmp->method, method,method_len);
}
tmp->is_request=TRUE;
tmp->major_version = major;
tmp->minor_version = minor;
*header = tmp;
return REBRICK_SUCCESS;
}
int32_t rebrick_http_header_new3(rebrick_http_header_t **header,int32_t status,const char *status_code,int8_t major,int8_t minor){
char current_time_str[32] = {0};
unused(current_time_str);
rebrick_http_header_t *tmp = new (rebrick_http_header_t);
constructor(tmp, rebrick_http_header_t);
if (status_code)
{
size_t len=strlen(status_code);
if (len > REBRICK_HTTP_MAX_STATUSCODE_LEN - 1)
return REBRICK_ERR_BAD_ARGUMENT;
memcpy(tmp->status_code_str, status_code,len);
}
tmp->status_code=status;
tmp->is_request=FALSE;
tmp->major_version = major;
tmp->minor_version = minor;
*header = tmp;
return REBRICK_SUCCESS;
}
int32_t rebrick_http_header_new4(rebrick_http_header_t **header,int32_t status,const void *status_code,size_t status_code_len,int8_t major,int8_t minor){
char current_time_str[32] = {0};
unused(current_time_str);
rebrick_http_header_t *tmp = new (rebrick_http_header_t);
constructor(tmp, rebrick_http_header_t);
if (status_code)
{
size_t len=status_code_len;
if (len > REBRICK_HTTP_MAX_STATUSCODE_LEN - 1)
return REBRICK_ERR_BAD_ARGUMENT;
memcpy(tmp->status_code_str, status_code,len);
}
tmp->status_code=status;
tmp->is_request=FALSE;
tmp->major_version = major;
tmp->minor_version = minor;
*header = tmp;
return REBRICK_SUCCESS;
}
int32_t rebrick_http_header_add_header(rebrick_http_header_t *header, const char *key, const char *value)
{
char current_time_str[32] = {0};
unused(current_time_str);
int32_t result;
if (!header || !key || !value)
return REBRICK_ERR_BAD_ARGUMENT;
rebrick_http_key_value_t *keyvalue;
result = rebrick_http_key_value_new(&keyvalue, key, value);
if (result)
return result;
HASH_ADD_STR(header->headers, key_lower, keyvalue);
return REBRICK_SUCCESS;
}
int32_t rebrick_http_header_add_header2(rebrick_http_header_t *header, const char *key,size_t keylen, const char *value,size_t valuelen)
{
char current_time_str[32] = {0};
unused(current_time_str);
int32_t result;
if (!header || !key || !value)
return REBRICK_ERR_BAD_ARGUMENT;
rebrick_http_key_value_t *keyvalue;
result = rebrick_http_key_value_new2(&keyvalue, key,keylen,value,valuelen);
if (result)
return result;
HASH_ADD_STR(header->headers, key_lower, keyvalue);
return REBRICK_SUCCESS;
}
int32_t rebrick_http_header_contains_key(rebrick_http_header_t *header, const char *key, int32_t *founded)
{
if (!header || !key)
return REBRICK_ERR_BAD_ARGUMENT;
//to lower
char keylower[REBRICK_HTTP_MAX_HEADER_KEY_LEN]={0};
strncpy(keylower,key,REBRICK_HTTP_MAX_HEADER_KEY_LEN-1);
string_to_lower(keylower);
rebrick_http_key_value_t *keyvalue;
HASH_FIND_STR(header->headers, keylower, keyvalue);
*founded = FALSE;
if (keyvalue)
*founded = TRUE;
return REBRICK_SUCCESS;
}
int32_t rebrick_http_header_get_header(rebrick_http_header_t *header,const char *key,const char **value){
if (!header || !key)
return REBRICK_ERR_BAD_ARGUMENT;
rebrick_http_key_value_t *keyvalue;
char keylower[REBRICK_HTTP_MAX_HEADER_KEY_LEN]={0};
strncpy(keylower,key,REBRICK_HTTP_MAX_HEADER_KEY_LEN-1);
string_to_lower(keylower);
HASH_FIND_STR(header->headers, keylower, keyvalue);
*value=NULL;
if (keyvalue){
*value=keyvalue->value;
}
return REBRICK_SUCCESS;
}
int32_t rebrick_http_header_remove_key(rebrick_http_header_t *header, const char *key)
{
if (!header || !key)
return REBRICK_ERR_BAD_ARGUMENT;
char keylower[REBRICK_HTTP_MAX_HEADER_KEY_LEN]={0};
strncpy(keylower,key,REBRICK_HTTP_MAX_HEADER_KEY_LEN-1);
string_to_lower(keylower);
rebrick_http_key_value_t *keyvalue;
HASH_FIND_STR(header->headers, keylower, keyvalue);
if (keyvalue)
{
HASH_DEL(header->headers, keyvalue);
rebrick_http_key_value_destroy(keyvalue);
}
return REBRICK_SUCCESS;
}
int32_t rebrick_http_header_destroy(rebrick_http_header_t *header)
{
char current_time_str[32] = {0};
unused(current_time_str);
if (header)
{
rebrick_http_key_value_t *s, *tmp;
HASH_ITER(hh, header->headers, s, tmp)
{
HASH_DEL(header->headers, s);
rebrick_http_key_value_destroy(s);
}
free(header);
}
return REBRICK_SUCCESS;
}
int32_t rebrick_http_header_to_buffer(rebrick_http_header_t *header, rebrick_buffer_t **rbuffer)
{
char current_time_str[32] = {0};
unused(current_time_str);
if (!header)
return REBRICK_ERR_BAD_ARGUMENT;
char buffer[REBRICK_HTTP_MAX_HEADER_LEN];
int32_t written_chars_count = 0;
if (header->path[0])
written_chars_count=snprintf(buffer,REBRICK_HTTP_MAX_HEADER_LEN,"%s %s HTTP/%d.%d\r\n",(header->method?header->method:"GET"),header->path,header->major_version,header->minor_version);
else
written_chars_count = snprintf(buffer, REBRICK_HTTP_MAX_HEADER_LEN, "HTTP/%d.%d %d %s\r\n", header->major_version, header->minor_version, header->status_code, header->status_code_str);
if (written_chars_count == REBRICK_HTTP_MAX_HEADER_LEN - 1)
{
rebrick_log_error("max http header len\n");
return REBRICK_ERR_LEN_NOT_ENOUGH;
}
rebrick_http_key_value_t *s, *tmp;
HASH_ITER(hh, header->headers, s, tmp)
{
written_chars_count += snprintf(buffer + written_chars_count, REBRICK_HTTP_MAX_HEADER_LEN - written_chars_count, "%s:%s\r\n", s->key, s->value);
if (written_chars_count == REBRICK_HTTP_MAX_HEADER_LEN - 1)
{
rebrick_log_error("max http header len\n");
return REBRICK_ERR_LEN_NOT_ENOUGH;
}
}
written_chars_count += snprintf(buffer + written_chars_count, REBRICK_HTTP_MAX_HEADER_LEN - written_chars_count, "\r\n");
if (written_chars_count == REBRICK_HTTP_MAX_HEADER_LEN - 1)
{
rebrick_log_error("max http header len\n");
return REBRICK_ERR_LEN_NOT_ENOUGH;
}
rebrick_buffer_t *rtmp;
int32_t result = rebrick_buffer_new(&rtmp, cast(buffer,uint8_t*), written_chars_count, REBRICK_HTTP_MAX_HEADER_LEN);
if (result < 0)
return result;
*rbuffer = rtmp;
return REBRICK_SUCCESS;
}<file_sep>#ifndef __REBRICK_CONFIG_H__
#define __REBRICK_CONFIG_H__
#include "rebrick_common.h"
#include "rebrick_log.h"
enum rebrick_listen_family{
REBRICK_IPV4=0,
REBRICK_IPV6=1,
REBRICK_IPV4_IPV6=2
};
public_ typedef struct rebrick_config
{
/* memory leak olursa,memory dump yapıp bakıyoruz, bütün struct larda aynı property var */
base_object();
/*server listen port*/
public_ readonly_ int32_t listen_port;
/*server listen family */
public_ readonly_ int32_t listen_family;
} rebrick_config_t;
/**
* @brief Create a roksit config object
*
* @return rebrick_config_t*
*/
int32_t rebrick_config_new(rebrick_config_t **config);
/**
* @brief destroys a config object
*
* @param config object
*/
void rebrick_config_destroy(rebrick_config_t *config);
#endif
<file_sep>#include "rebrick_metrics.h"
int32_t rebrick_metrics_new(rebrick_metrics_t **metrics)
{
char current_time_str[32] = {0};
unused(current_time_str);
rebrick_metrics_t *tmp;
tmp = new (rebrick_metrics_t);
constructor(tmp, rebrick_metrics_t);
tmp->start_time = rebrick_util_micro_time();
*metrics = tmp;
return REBRICK_SUCCESS;
}
void rebrick_metrics_destroy(rebrick_metrics_t *metrics)
{
if (metrics)
{
free(metrics);
}
}
int32_t rebrick_metrics_tostring(const rebrick_metrics_t *metrics, char buffer[REBRICK_METRICS_MAX_STR_LEN])
{
fill_zero(buffer, REBRICK_METRICS_MAX_STR_LEN);
int32_t result = snprintf(buffer, REBRICK_METRICS_MAX_STR_LEN, "start_time:%" PRId64 "\n\
current_time:%" PRId64 "\n\
received_total:%" PRId64 "\n\
received_error_total:%" PRId64 "\n\
received_success_total:%" PRId64 "\n\
forward_total:%" PRId64 "\n\
forward_error_total:%" PRId64 "\n\
forward_success_total:%" PRId64 "\n",
metrics->start_time, metrics->current_time, metrics->received_total, metrics->received_error_total, metrics->received_success_total,
metrics->forward_total, metrics->forward_error_total, metrics->forward_success_total);
if (result < 0)
return REBRICK_ERR_SPRINTF;
return result;
}<file_sep>#include "rebrick_config.h"
int32_t rebrick_config_new(rebrick_config_t **config)
{
char current_time_str[32] = {0};
rebrick_config_t *tmp = new(rebrick_config_t);
constructor(tmp,rebrick_config_t);
char *port = getenv("LISTEN_PORT");
rebrick_log_info("environment variable LISTEN_PORT is: %s\n", port ? port : "null");
tmp->listen_port = 53;
if (port)
tmp->listen_port = atoi(port);
tmp->listen_family = REBRICK_IPV4;
char *listen_family = getenv("LISTEN_FAMILY");
rebrick_log_info("environment variable LISTEN_FAMILY is: %s\n", listen_family ? listen_family : "null");
if (listen_family)
{
if (strcmp(listen_family, "IPV4") == 0)
tmp->listen_family = REBRICK_IPV4;
if (strcmp(listen_family, "IPV6") == 0)
tmp->listen_family = REBRICK_IPV6;
if (strcmp(listen_family, "IPV4_IPV6") == 0)
tmp->listen_family = REBRICK_IPV4_IPV6;
}
*config = tmp;
return REBRICK_SUCCESS;
}
void rebrick_config_destroy(rebrick_config_t *config)
{
if (config){
free(config);
}
}
<file_sep>#ifndef __REBRICK_METRICS_H__
#define __REBRICK_METRICS_H__
#include "rebrick_common.h"
#include "rebrick_log.h"
#include "rebrick_util.h"
public_ typedef struct rebrick_metrics
{
base_object();
public_ int64_t start_time;
public_ int64_t current_time;
public_ int64_t received_total;
public_ int64_t received_error_total;
public_ int64_t received_success_total;
public_ int64_t forward_total;
public_ int64_t forward_error_total;
public_ int64_t forward_success_total;
/* data */
} rebrick_metrics_t;
/**
* @brief Create a roksit metrics object
*
* @param metrics input pointer for creation
* @return int32_t <0 means error, @see REBRICK_SUCCESS
*/
int32_t rebrick_metrics_new(rebrick_metrics_t **metrics);
/**
* @brief destroys a roksit metrics objects
*
*/
void rebrick_metrics_destroy(rebrick_metrics_t *metrics);
/**
* @brief max string buffer
*
*/
#define REBRICK_METRICS_MAX_STR_LEN 512
/**
* @brief writes metric object as string
*
* @param metrics
* @param buffer
* @return int32_t <0 means error, >0 strlen of string
*/
int32_t rebrick_metrics_tostring(const rebrick_metrics_t *metrics, char buffer[REBRICK_METRICS_MAX_STR_LEN]);
#endif<file_sep>#include "rebrick_context.h"
int32_t rebrick_context_new(rebrick_context_t **context, rebrick_config_t *config, rebrick_metrics_t *metrics)
{
char current_time_str[32] = {0};
unused(current_time_str);
rebrick_context_t *tmp = new (rebrick_context_t);
constructor(tmp, rebrick_context_t);
tmp->config = config;
tmp->metrics = metrics;
*context = tmp;
return REBRICK_SUCCESS;
}
void rebrick_context_destroy(rebrick_context_t *context)
{
if (context)
free(context);
}
<file_sep>#include "./http/rebrick_httpsocket.h"
#include "cmocka.h"
#include <unistd.h>
static int setup(void**state){
unused(state);
rebrick_tls_init();
fprintf(stdout,"**** %s ****\n",__FILE__);
return 0;
}
static int teardown(void **state){
unused(state);
rebrick_tls_cleanup();
int32_t counter = 100;
while (counter--)
{
uv_run(uv_default_loop(), UV_RUN_NOWAIT);
usleep(1000);
}
uv_loop_close(uv_default_loop());
return 0;
}
static void rebrick_http_keyvalue_test(void **state){
unused(state);
int32_t result;
rebrick_http_key_value_t *keyvalue;
result=rebrick_http_key_value_new(&keyvalue,"hamza","kilic");
assert_int_equal(result,REBRICK_SUCCESS);
assert_memory_equal(keyvalue->key,"hamza",5);
assert_memory_equal(keyvalue->value,"kilic",5);
assert_int_equal(keyvalue->keylen,5);
assert_int_equal(keyvalue->valuelen,5);
rebrick_http_key_value_destroy(keyvalue);
}
static void rebrick_http_keyvalue_test2(void **state){
unused(state);
int32_t result;
rebrick_http_key_value_t *keyvalue;
result=rebrick_http_key_value_new2(&keyvalue,"hamza",5,"kilic",5);
assert_int_equal(result,REBRICK_SUCCESS);
assert_memory_equal(keyvalue->key,"hamza",6);
assert_memory_equal(keyvalue->value,"kilic",6);
assert_int_equal(keyvalue->keylen,5);
assert_int_equal(keyvalue->valuelen,5);
rebrick_http_key_value_destroy(keyvalue);
}
static void rebrick_http_header_test(void **state){
unused(state);
int32_t result;
rebrick_http_header_t *header;
result=rebrick_http_header_new(&header,"POST","/api/metrics",1,1);
assert_int_equal(result,REBRICK_SUCCESS);
assert_string_equal(header->path,"/api/metrics");
assert_string_equal(header->method,"POST");
assert_int_equal(header->major_version,1);
assert_int_equal(header->minor_version,1);
assert_int_equal(header->is_request,TRUE);
assert_null(header->headers);
assert_string_equal(header->status_code_str,"");
assert_int_equal(header->status_code,0);
rebrick_http_header_destroy(header);
}
static void rebrick_http_header_test2(void **state){
unused(state);
int32_t result;
rebrick_http_header_t *header;
result=rebrick_http_header_new2(&header,"POST",4,"/api/metrics",12,1,1);
assert_int_equal(result,REBRICK_SUCCESS);
assert_string_equal(header->path,"/api/metrics");
assert_string_equal(header->method,"POST");
assert_int_equal(header->major_version,1);
assert_int_equal(header->minor_version,1);
assert_int_equal(header->is_request,TRUE);
assert_string_equal(header->status_code_str,"");
assert_int_equal(header->status_code,0);
assert_null(header->headers);
result=rebrick_http_header_add_header(header,"content-type","application/json");
assert_int_equal(result,REBRICK_SUCCESS);
int32_t founded;
result=rebrick_http_header_contains_key(header,"content-type",&founded);
assert_int_equal(result,REBRICK_SUCCESS);
assert_int_equal(founded,TRUE);
result=rebrick_http_header_contains_key(header,"Content-Type",&founded);
assert_int_equal(result,REBRICK_SUCCESS);
assert_int_equal(founded,TRUE);
result=rebrick_http_header_remove_key(header,"content-type");
assert_int_equal(result,REBRICK_SUCCESS);
result=rebrick_http_header_contains_key(header,"content-type",&founded);
assert_int_equal(result,REBRICK_SUCCESS);
assert_int_equal(founded,FALSE);
rebrick_http_header_destroy(header);
}
static void rebrick_http_header_test3(void **state){
unused(state);
int32_t result;
rebrick_http_header_t *header;
result=rebrick_http_header_new3(&header,200,"OK",1,1);
assert_int_equal(result,REBRICK_SUCCESS);
assert_int_equal(header->major_version,1);
assert_int_equal(header->minor_version,1);
assert_int_equal(header->is_request,FALSE);
assert_null(header->headers);
assert_string_equal(header->path,"");
assert_string_equal(header->method,"");
assert_int_equal(header->status_code,200);
assert_string_equal(header->status_code_str,"OK");
rebrick_http_header_destroy(header);
}
static void rebrick_http_header_test4(void **state){
unused(state);
int32_t result;
rebrick_http_header_t *header;
result=rebrick_http_header_new4(&header,500,"INTERNAL ERROR",14,1,1);
assert_int_equal(result,REBRICK_SUCCESS);
assert_int_equal(header->major_version,1);
assert_int_equal(header->minor_version,1);
assert_int_equal(header->is_request,FALSE);
assert_null(header->headers);
assert_string_equal(header->path,"");
assert_string_equal(header->method,"");
assert_int_equal(header->status_code,500);
assert_string_equal(header->status_code_str,"INTERNAL ERROR");
rebrick_http_header_destroy(header);
}
static void rebrick_http_header_to_buffer_test(void **state){
unused(state);
int32_t result;
rebrick_http_header_t *header;
result=rebrick_http_header_new(&header,"POST","/api/metrics",1,1);
assert_int_equal(result,REBRICK_SUCCESS);
result=rebrick_http_header_add_header(header,"content-type","application/json");
assert_int_equal(result,REBRICK_SUCCESS);
result=rebrick_http_header_add_header(header,"host","hamzakilic.com");
assert_int_equal(result,REBRICK_SUCCESS);
rebrick_buffer_t *buffer;
result=rebrick_http_header_to_buffer(header,&buffer);
assert_int_equal(result,REBRICK_SUCCESS);
assert_string_equal(buffer->buf,"POST /api/metrics HTTP/1.1\r\ncontent-type:application/json\r\nhost:hamzakilic.com\r\n\r\n");
rebrick_buffer_destroy(buffer);
rebrick_http_header_destroy(header);
}
int test_rebrick_http(void) {
const struct CMUnitTest tests[] = {
cmocka_unit_test(rebrick_http_keyvalue_test),
cmocka_unit_test(rebrick_http_keyvalue_test2),
cmocka_unit_test(rebrick_http_header_test),
cmocka_unit_test(rebrick_http_header_test2),
cmocka_unit_test(rebrick_http_header_test3),
cmocka_unit_test(rebrick_http_header_test4),
cmocka_unit_test(rebrick_http_header_to_buffer_test)
};
return cmocka_run_group_tests(tests, setup, teardown);
}
<file_sep>#include "rebrick_tlssocket.h"
/**
* @brief client yada ssl olduğu için biz
* içerden rebrick_tcpsocket_send yapıyoruz
* bu fonksiyon da aftersend_data isimli bir parametre alıyor
* ve callback fonksiyona geçiyor.
*
*/
#define REBRICK_BUFFER_MALLOC_SIZE 8192
#define BUF_SIZE 8192
private_ typedef struct send_data_holder
{
base_object();
private_ rebrick_clean_func_t *client_data;
private_ void *internal_data;
private_ size_t internal_data_len;
} send_data_holder_t;
enum sslstatus
{
SSLSTATUS_OK,
SSLSTATUS_WANT_READ,
SSLSTATUS_WANT_WRITE,
SSLSTATUS_CLOSED,
SSLSTATUS_FAIL
};
static enum sslstatus get_sslstatus(SSL *ssl, int n)
{
switch (SSL_get_error(ssl, n))
{
case SSL_ERROR_NONE:
return SSLSTATUS_OK;
case SSL_ERROR_WANT_WRITE:
printf("ssl want write\n");
return SSLSTATUS_WANT_WRITE;
case SSL_ERROR_WANT_READ:
return SSLSTATUS_WANT_READ;
case SSL_ERROR_ZERO_RETURN:
return SSLSTATUS_CLOSED;
case SSL_ERROR_SYSCALL:
default:
return SSLSTATUS_FAIL;
}
}
char sslerror[4096];
char *getOpenSSLError()
{
BIO *bio = BIO_new(BIO_s_mem());
ERR_print_errors(bio);
char *buf;
size_t len = BIO_get_mem_data(bio, &buf);
size_t strlen = sizeof(sslerror);
memset(sslerror, 0, strlen);
memcpy(sslerror, buf, len < strlen ? len : (strlen - 1));
BIO_free(bio);
return sslerror;
}
static void clean_send_data_holder(void *ptr)
{
send_data_holder_t *senddata = cast(ptr, send_data_holder_t *);
if (senddata && senddata->internal_data)
free(senddata->internal_data);
if (senddata && senddata->client_data)
{
if (senddata->client_data->func)
senddata->client_data->func(senddata->client_data->ptr);
free(senddata->client_data);
}
if (senddata)
free(senddata);
}
static int32_t flush_ssl_buffers(rebrick_tlssocket_t *tlssocket)
{
char buftemp[BUF_SIZE] = {0};
char current_time_str[32] = {0};
unused(current_time_str);
int32_t result;
int32_t n;
if (!tlssocket || !tlssocket->tls)
{
rebrick_log_fatal("socket tls is null\n");
return REBRICK_ERR_BAD_ARGUMENT;
}
do
{
n = BIO_read(tlssocket->tls->write, buftemp, sizeof(buftemp));
if (n > 0)
{
char *xbuf = malloc(n);
memcpy(xbuf, buftemp, n);
send_data_holder_t *holder = new (send_data_holder_t);
constructor(holder, send_data_holder_t);
holder->internal_data = xbuf;
holder->internal_data_len = n;
holder->client_data = NULL;
rebrick_clean_func_t cleanfunc = {.func = clean_send_data_holder, .ptr = holder};
result = rebrick_tcpsocket_send(cast_to_tcp_socket(tlssocket), buftemp, n, cleanfunc);
if (result < 0)
{
free(xbuf);
free(holder);
return result;
}
}
else if (!BIO_should_retry(tlssocket->tls->write))
{
return REBRICK_ERR_TLS_ERR;
}
} while (n > 0);
return REBRICK_SUCCESS;
}
static int32_t check_ssl_status(rebrick_tlssocket_t *tlssocket, int32_t n)
{
char current_time_str[32] = {0};
unused(current_time_str);
// int32_t result;
enum sslstatus status;
// char buftemp[BUF_SIZE] = {0};
if (!tlssocket || !tlssocket->tls)
{
rebrick_log_fatal("socket tls is null\n");
return REBRICK_ERR_BAD_ARGUMENT;
}
status = get_sslstatus(tlssocket->tls->ssl, n);
if (status == SSLSTATUS_WANT_READ)
{
rebrick_log_debug("ssl want read\n");
n = flush_ssl_buffers(tlssocket);
if (n < 0)
return n;
}
if (status == SSLSTATUS_WANT_WRITE)
{
rebrick_log_debug("ssl want write\n");
return REBRICK_ERR_TLS_ERR;
}
if (status == SSLSTATUS_CLOSED)
{
rebrick_log_error("ssl closed\n");
return REBRICK_ERR_TLS_CLOSED;
}
if (status == SSLSTATUS_FAIL)
{
rebrick_log_error("ssl failed\n");
return REBRICK_ERR_TLS_ERR;
}
if (!SSL_is_init_finished(tlssocket->tls->ssl))
return REBRICK_ERR_TLS_INIT_NOT_FINISHED;
return REBRICK_SUCCESS;
}
void flush_buffers(struct rebrick_tlssocket *tlssocket)
{
char current_time_str[32] = {0};
unused(current_time_str);
char buftemp[BUF_SIZE];
if (tlssocket && tlssocket->pending_write_list)
{
int32_t result;
rebrick_log_debug("pending read list try to send\n");
size_t len = 0;
int32_t error_occured = 0;
struct pending_data *el, *tmp;
DL_FOREACH_SAFE(tlssocket->pending_write_list, el, tmp)
{
char *tmpbuffer = NULL;
result = rebrick_buffers_to_array(el->data, &tmpbuffer, &len);
int32_t writen_len = 0;
int32_t temp_len = len;
error_occured = 0;
while (writen_len < temp_len)
{
int32_t n = SSL_write(tlssocket->tls->ssl, (const void *)(tmpbuffer + writen_len), temp_len - writen_len);
result = check_ssl_status(tlssocket, n);
if (result == REBRICK_ERR_TLS_ERR || result == REBRICK_ERR_TLS_CLOSED)
{
rebrick_log_error("tls failed with %d\n", result);
error_occured = 1;
free(tmpbuffer);
if (tlssocket->on_error_occured)
tlssocket->on_error_occured(cast_to_base_socket(tlssocket), tlssocket->override_callback_data, result);
break;
}
else if (result != REBRICK_SUCCESS)
{
error_occured = 1;
free(tmpbuffer);
break;
}
if (n > 0)
{
writen_len += n;
do
{
n = BIO_read(tlssocket->tls->write, buftemp, sizeof(buftemp));
if (n > 0)
{
send_data_holder_t *holder = new (send_data_holder_t);
constructor(holder, send_data_holder_t);
holder->internal_data = tmpbuffer;
holder->internal_data_len = len;
holder->client_data = el->clean_func;
rebrick_clean_func_t cleanfunc = {.func = clean_send_data_holder, .ptr = holder};
//client datası olduğunu belirtmek için source 1 yapılıyor
cleanfunc.anydata.source = 1;
result = rebrick_tcpsocket_send(cast_to_tcp_socket(tlssocket), buftemp, n, cleanfunc);
if (result < 0)
{
free(holder);
free(tmpbuffer);
}
rebrick_buffers_destroy(el->data);
el->data = NULL;
}
else if (!BIO_should_retry(tlssocket->tls->write))
{
error_occured = 1;
break;
}
} while (n > 0);
}
}
if (!error_occured)
{
DL_DELETE(tlssocket->pending_write_list, el);
free(el);
}
else
{
break;
}
}
}
}
/**
* @brief checs ssl status
*
* @param tlssocket
* @return int32_t REBRICK_ERR_BAD_ARGUMENT,REBRICK_ERR_TLS_ERR,REBRICK_ERR_TLS_INIT_NOT_FINISHED,REBRICK_SUCCESS
*/
static int32_t ssl_handshake(rebrick_tlssocket_t *tlssocket)
{
char current_time_str[32] = {0};
unused(current_time_str);
//int32_t result;
int32_t n;
//enum sslstatus status;
//char buftemp[BUF_SIZE];
if (!tlssocket && !tlssocket->tls)
{
rebrick_log_fatal("socket tls is null\n");
return REBRICK_ERR_BAD_ARGUMENT;
}
if (!tlssocket->sslhandshake_initted)
{
if (tlssocket->is_server)
n = SSL_accept(tlssocket->tls->ssl);
else
n = SSL_connect(tlssocket->tls->ssl);
if (n == 1 || get_sslstatus(tlssocket->tls->ssl, n) == SSLSTATUS_WANT_READ)
{
tlssocket->sslhandshake_initted = 1;
return n;
}
return REBRICK_ERR_TLS_ERR;
}
return REBRICK_SUCCESS;
}
static void local_on_error_occured_callback(rebrick_socket_t *socket, void *callbackdata, int32_t error)
{
char current_time_str[32] = {0};
unused(current_time_str);
unused(error);
unused(callbackdata);
rebrick_tlssocket_t *tlssocket = cast(socket, rebrick_tlssocket_t *);
if (tlssocket && tlssocket->override_on_error_occured)
tlssocket->override_on_error_occured(cast_to_base_socket(tlssocket), tlssocket->override_callback_data, error);
}
#define call_after_connection(tlsserver, tlsclient) \
if (tlsserver && tlsclient && !tlsclient->called_override_after_connection_accepted && tlsclient->override_on_connection_accepted) \
{ \
tlsclient->called_override_after_connection_accepted++; \
tlsclient->override_on_connection_accepted(cast_to_base_socket(tlsserver), tlsclient->override_callback_data, &tlsclient->bind_addr.base, tlsclient); \
}
static void local_on_connection_accepted_callback(rebrick_socket_t *serversocket, void *callback_data, const struct sockaddr *addr, void *client_handle)
{
char current_time_str[32] = {0};
unused(current_time_str);
unused(addr);
unused(callback_data);
int32_t result;
rebrick_tlssocket_t *tlsserver = cast(serversocket, rebrick_tlssocket_t *);
if (!tlsserver)
{
rebrick_log_fatal("callback_data casting is null\n");
return;
}
rebrick_tlssocket_t *tlsclient = NULL;
//server ise client_handle yeni handle'dır yoksa, server handle ile aynıdır
if (tlsserver->is_server)
tlsclient = cast(client_handle, rebrick_tlssocket_t *);
else
tlsclient = tlsserver;
//bağlandığında client yada server-client için yeni bir ssl oluşturulur
rebrick_tls_ssl_t *tls_ssl;
if (tlsserver->is_server && strlen(tlsserver->sni_pattern_or_name))
result = rebrick_tls_ssl_new2(&tls_ssl, tlsserver->sni_pattern_or_name);
else
if (!tlsserver->is_server && strlen(tlsserver->sni_pattern_or_name))
result=rebrick_tls_ssl_new3(&tls_ssl,tlsserver->tls_context,tlsserver->sni_pattern_or_name);
else result = rebrick_tls_ssl_new(&tls_ssl, tlsserver->tls_context);
if (result)
{
if (tlsserver->is_server)
rebrick_tlssocket_destroy(tlsclient);
client_handle = NULL;
rebrick_log_fatal("ssl new failed for %s\n", tlsserver->tls_context->key);
if (tlsserver->override_on_error_occured)
tlsserver->override_on_error_occured(cast_to_base_socket(tlsserver), tlsserver->override_callback_data, result);
return;
}
//base sınıfta olmayan kısımlar burada implemente edilmeli
tlsclient->tls_context = tlsserver->tls_context;
tlsclient->tls = tls_ssl;
//burası sni çözmek için lazım
tlsclient->tls->ref = tlsclient;
//valgrind overlap diyor
if (tlsclient != tlsserver)
strncpy(tlsclient->sni_pattern_or_name, tlsserver->sni_pattern_or_name, REBRICK_TLS_SNI_MAX_LEN - 1);
tlsclient->override_on_connection_accepted = tlsserver->override_on_connection_accepted;
tlsclient->override_on_connection_closed = tlsserver->override_on_connection_closed;
tlsclient->override_on_data_received = tlsserver->override_on_data_received;
tlsclient->override_on_data_sended = tlsserver->override_on_data_sended;
tlsclient->override_on_error_occured = tlsserver->override_on_error_occured;
tlsclient->override_callback_data = tlsserver->override_callback_data;
//tlsclient için callback_data kendisi geçilir.
tlsclient->callback_data = tlsclient;
int32_t status = ssl_handshake(tlsclient);
if (status)
{
if (status == REBRICK_ERR_BAD_ARGUMENT)
{
if (tlsserver->is_server)
rebrick_tlssocket_destroy(tlsclient);
client_handle = NULL;
rebrick_log_fatal("connection accepted failed with error:%d\n", status);
if (tlsserver->override_on_error_occured)
tlsserver->override_on_error_occured(cast_to_base_socket(tlsserver), tlsserver->override_callback_data, status);
return;
}
status = check_ssl_status(tlsclient, status);
if (status == REBRICK_SUCCESS || status == REBRICK_ERR_TLS_INIT_NOT_FINISHED)
{
//ssl problemi yok ise, her loop sonrası çalışacak kod ekleniyor
//rebrick_after_io_list_add(flush_buffers, tlsclient);
}
else
{
//null koruması var
//burası nasıl silinmeli acaba
if (tlsserver->is_server)
rebrick_tlssocket_destroy(tlsclient);
client_handle = NULL;
status = REBRICK_ERR_TLS_INIT;
rebrick_log_fatal("connection accepted failed with error:%d\n", status);
if (tlsserver->override_on_error_occured)
tlsserver->override_on_error_occured(cast_to_base_socket(tlsserver), tlsserver->override_callback_data, status);
return;
}
//this function triggers, if tls client is successfully connected
call_after_connection(tlsserver, tlsclient);
}
}
static void local_on_connection_closed_callback(rebrick_socket_t *socket, void *callback_data)
{
char current_time_str[32] = {0};
unused(current_time_str);
unused(callback_data);
rebrick_tlssocket_t *tlssocket = cast(socket, rebrick_tlssocket_t *);
if (!tlssocket)
{
rebrick_log_fatal("callback_data casting is null\n");
return;
}
rebrick_after_io_list_remove(tlssocket);
rebrick_tls_ssl_destroy(tlssocket->tls);
tlssocket->tls = NULL;
pending_data_t *el, *tmp;
DL_FOREACH_SAFE(tlssocket->pending_write_list, el, tmp)
{
rebrick_buffers_destroy(el->data);
DL_DELETE(tlssocket->pending_write_list, el);
rebrick_clean_func_t *deletedata = el->clean_func;
free(el);
if (deletedata)
{
if (deletedata->func)
{
deletedata->func(deletedata->ptr);
}
free(deletedata);
}
}
if (tlssocket->override_on_connection_closed)
tlssocket->override_on_connection_closed(cast_to_base_socket(tlssocket), tlssocket->override_callback_data);
}
static void local_after_data_received_callback(rebrick_socket_t *socket, void *callback_data, const struct sockaddr *addr, const char *buffer, ssize_t len)
{
char current_time_str[32] = {0};
unused(current_time_str);
unused(callback_data);
int32_t result;
unused(result);
int32_t n;
int32_t status;
rebrick_tlssocket_t *tlssocket = cast(socket, rebrick_tlssocket_t *);
char buftemp[4096];
if (!tlssocket)
{
rebrick_log_fatal("callback_data casting is null\n");
return;
}
rebrick_buffers_t *readedbuffer = NULL;
size_t tmp_len = len;
while (tmp_len)
{
n = BIO_write(tlssocket->tls->read, buffer, tmp_len);
if (n <= 0)
{
if (BIO_should_retry(tlssocket->tls->read))
{
continue;
}
rebrick_log_error("ssl bio write failed\n");
rebrick_buffers_destroy(readedbuffer);
if (tlssocket->override_on_error_occured)
tlssocket->override_on_error_occured(cast_to_base_socket(tlssocket), tlssocket->override_callback_data, REBRICK_ERR_TLS_WRITE);
return;
}
buffer += n;
tmp_len -= n;
do
{
n = SSL_read(tlssocket->tls->ssl, buftemp, sizeof(buftemp));
if (n > 0)
{
//okunan byteları
if (!readedbuffer)
rebrick_buffers_new(&readedbuffer, (uint8_t *)buftemp, (size_t)n, REBRICK_BUFFER_MALLOC_SIZE);
else
rebrick_buffers_add(readedbuffer, (uint8_t *)buftemp, (size_t)n);
}
} while (n > 0);
status = check_ssl_status(tlssocket, n);
if (status == REBRICK_ERR_TLS_ERR || status == REBRICK_ERR_TLS_CLOSED)
{
rebrick_log_error("ssl status failed %d:%d\n", n, status);
rebrick_buffers_destroy(readedbuffer);
if (tlssocket->override_on_error_occured)
tlssocket->override_on_error_occured(cast_to_base_socket(tlssocket), tlssocket->override_callback_data, status);
return;
}
}
if (tlssocket->override_on_data_received)
{
size_t array_len = 0;
char *array;
result = rebrick_buffers_to_array(readedbuffer, &array, &array_len);
if (array_len)
{
tlssocket->override_on_data_received(cast_to_base_socket(tlssocket), tlssocket->override_callback_data, addr, array, array_len);
free(array);
}
}
rebrick_buffers_destroy(readedbuffer);
flush_buffers(tlssocket);
}
static void local_on_data_sended_callback(rebrick_socket_t *socket, void *callback_data, void *source)
{
char current_time_str[32] = {0};
unused(current_time_str);
unused(callback_data);
rebrick_tlssocket_t *tlssocket = cast(socket, rebrick_tlssocket_t *);
if (!tlssocket)
{
rebrick_log_fatal("callback_data casting is null\n");
return;
}
//burası önemli, flush_ssl_buffer yaptığımızda
//flush_buffers(tlssocket);
if (source) //eğer gönderilen data client datası ise
if (tlssocket->override_on_data_sended)
tlssocket->override_on_data_sended(cast_to_base_socket(tlssocket), tlssocket->override_callback_data, NULL);
}
/**
* @brief this function creates a new instance of current instance
* this is function overloading
* @return struct rebrick_tcpsocket*
*/
static struct rebrick_tcpsocket *local_create_client()
{
char current_time_str[32] = {0};
unused(current_time_str);
rebrick_tlssocket_t *client = new (rebrick_tlssocket_t);
constructor(client, rebrick_tlssocket_t);
return cast(client, rebrick_tcpsocket_t *);
}
int32_t rebrick_tlssocket_init(rebrick_tlssocket_t *tlssocket, const rebrick_tls_context_t *tls_context, rebrick_sockaddr_t addr, void *callback_data,
rebrick_on_connection_accepted_callback_t on_connection_accepted,
rebrick_on_connection_closed_callback_t on_connection_closed,
rebrick_on_data_received_callback_t on_data_received,
rebrick_on_data_sended_callback_t on_data_sended,
rebrick_on_error_occured_callback_t on_error_occured, int32_t backlog_or_isclient, rebrick_tcpsocket_create_client_t create_client)
{
char current_time_str[32] = {0};
unused(current_time_str);
int32_t result;
if (!tls_context)
{
rebrick_log_fatal("tls context is null\n");
return REBRICK_ERR_BAD_ARGUMENT;
}
if (rebrick_tls_context_is_server(tls_context) && !backlog_or_isclient)
{
rebrick_log_fatal("tls context is server but backlog_or_isclient parameter is 0\n");
return REBRICK_ERR_BAD_ARGUMENT;
}
if (!rebrick_tls_context_is_server(tls_context) && backlog_or_isclient)
{
rebrick_log_fatal("tls context is client but backlog_or_isclient parameter is server > 0\n");
return REBRICK_ERR_BAD_ARGUMENT;
}
tlssocket->is_server = backlog_or_isclient;
tlssocket->override_on_connection_accepted = on_connection_accepted;
tlssocket->override_on_connection_closed = on_connection_closed;
tlssocket->override_on_data_received = on_data_received;
tlssocket->override_on_data_sended = on_data_sended;
tlssocket->override_callback_data = callback_data;
tlssocket->override_on_error_occured = on_error_occured;
tlssocket->tls_context = tls_context;
//this is OOP inheritance with c
//base class init function call.
result = rebrick_tcpsocket_init(cast_to_tcp_socket(tlssocket), addr, tlssocket, local_on_connection_accepted_callback,
local_on_connection_closed_callback, local_after_data_received_callback, local_on_data_sended_callback, local_on_error_occured_callback, backlog_or_isclient, create_client);
if (result)
{
int32_t uv_err = HAS_UV_ERR(result) ? UV_ERR(result) : 0;
rebrick_log_fatal("tcpsocket create failed with result:%d %s\n", result, uv_strerror(uv_err));
return result;
}
return REBRICK_SUCCESS;
}
int32_t rebrick_tlssocket_new(rebrick_tlssocket_t **socket, const rebrick_tls_context_t *tls_context, rebrick_sockaddr_t addr, void *callback_data,
rebrick_on_connection_accepted_callback_t on_connection_accepted,
rebrick_on_connection_closed_callback_t on_connection_closed,
rebrick_on_data_received_callback_t on_data_received,
rebrick_on_data_sended_callback_t on_data_sended,
rebrick_on_error_occured_callback_t on_error_occured, int32_t backlog_or_isclient)
{
char current_time_str[32] = {0};
unused(current_time_str);
int32_t result;
rebrick_tlssocket_t *tlssocket = new (rebrick_tlssocket_t);
constructor(tlssocket, rebrick_tlssocket_t);
result = rebrick_tlssocket_init(tlssocket, tls_context, addr, callback_data, on_connection_accepted, on_connection_closed, on_data_received, on_data_sended, on_error_occured, backlog_or_isclient, local_create_client);
if (result < 0)
{
free(tlssocket);
rebrick_log_error("tls socket init failed with:%d\n", result);
return result;
}
*socket = tlssocket;
return REBRICK_SUCCESS;
}
int32_t rebrick_tlssocket_new2(rebrick_tlssocket_t **socket, const char *sni_pattern_or_name, rebrick_tls_context_t *tlscontext, rebrick_sockaddr_t addr, void *callback_data,
rebrick_on_connection_accepted_callback_t on_connection_accepted,
rebrick_on_connection_closed_callback_t on_connection_closed,
rebrick_on_data_received_callback_t on_data_received,
rebrick_on_data_sended_callback_t on_data_sended,
rebrick_on_error_occured_callback_t on_error_occured, int32_t backlog_or_isclient)
{
char current_time_str[32] = {0};
unused(current_time_str);
int32_t result;
rebrick_tls_context_t *sni_context;
//if tlscontext is null, use default SNI context
if (tlscontext)
sni_context = tlscontext;
else
{
result = rebrick_tls_context_get(REBRICK_TLS_CONTEXT_SNI, &sni_context);
if (result < 0)
{
rebrick_log_fatal("sni tls context not found\n");
return result;
}
}
rebrick_tlssocket_t *tlssocket = new (rebrick_tlssocket_t);
constructor(tlssocket, rebrick_tlssocket_t);
result = rebrick_tlssocket_init(tlssocket, sni_context, addr, callback_data, on_connection_accepted, on_connection_closed, on_data_received, on_data_sended, on_error_occured, backlog_or_isclient, local_create_client);
if (result < 0)
{
free(tlssocket);
rebrick_log_error("tls socket init failed with:%d\n", result);
return result;
}
//burası sayesinde yeni bir
if(sni_pattern_or_name)
snprintf(tlssocket->sni_pattern_or_name, REBRICK_TLS_SNI_MAX_LEN, "%s", sni_pattern_or_name);
*socket = tlssocket;
return REBRICK_SUCCESS;
}
int32_t rebrick_tlssocket_destroy(rebrick_tlssocket_t *socket)
{
char current_time_str[32] = {0};
unused(current_time_str);
if (socket)
{
//buraya başka kod yazmaya gerek yok
if (socket->parent_socket)
{
int32_t result = SSL_shutdown(socket->tls->ssl);
check_ssl_status(socket, result);
}
else
{
rebrick_tcpsocket_t *el, *tmp;
DL_FOREACH_SAFE(socket->clients, el, tmp)
{
rebrick_tlssocket_t *tsocket = cast(el, rebrick_tlssocket_t *);
int32_t result = SSL_shutdown(tsocket->tls->ssl);
check_ssl_status(tsocket, result);
}
}
rebrick_tcpsocket_destroy(cast_to_tcp_socket(socket));
//free(socket) yapmakmak lazım, zaten tcpsocket yapıyor
}
return REBRICK_SUCCESS;
}
int32_t rebrick_tlssocket_send(rebrick_tlssocket_t *socket, char *buffer, size_t len, rebrick_clean_func_t cleanfuncs)
{
char current_time_str[32] = {0};
unused(current_time_str);
int32_t result;
int32_t n;
unused(result);
char buftemp[BUF_SIZE];
if (uv_is_closing(cast(&socket->handle.tcp, uv_handle_t *)))
{
return REBRICK_ERR_IO_CLOSED;
}
rebrick_buffers_t *buffertmp = NULL;
int32_t writen_len = 0;
int32_t temp_len = len;
while (writen_len < temp_len)
{
n = SSL_write(socket->tls->ssl, (const void *)(buffer + writen_len), temp_len - writen_len);
result = check_ssl_status(socket, n);
if (n > 0)
{
writen_len += n;
do
{
n = BIO_read(socket->tls->write, buftemp, sizeof(buftemp));
if (n > 0)
{
if (!buffertmp)
rebrick_buffers_new(&buffertmp, (uint8_t *)buftemp, (size_t)n, REBRICK_BUFFER_MALLOC_SIZE);
else
rebrick_buffers_add(buffertmp, (uint8_t *)buftemp, (size_t)n);
}
else if (!BIO_should_retry(socket->tls->write))
{
return REBRICK_ERR_TLS_ERR;
}
} while (n > 0);
}
else if (result == REBRICK_ERR_TLS_INIT_NOT_FINISHED)
{
//ssl problemli ise sonra yazalım
pending_data_t *data = new (pending_data_t);
constructor(data, pending_data_t);
rebrick_buffers_new(&data->data, (uint8_t *)(buffer + writen_len), (size_t)(temp_len - writen_len), REBRICK_BUFFER_MALLOC_SIZE);
rebrick_clean_func_clone(&cleanfuncs, data->clean_func);
DL_APPEND(socket->pending_write_list, data);
break;
}
else if (result == REBRICK_ERR_TLS_ERR || result == REBRICK_ERR_TLS_CLOSED)
{
rebrick_log_error("tls failed\n");
rebrick_buffers_destroy(buffertmp);
return result;
}
}
result = REBRICK_SUCCESS;
if (buffertmp)
{
char *tmpbuffer = NULL;
size_t tmplen = 0;
rebrick_buffers_to_array(buffertmp, &tmpbuffer, &tmplen);
if (tmplen)
{
send_data_holder_t *holder = new (send_data_holder_t);
constructor(holder, send_data_holder_t);
holder->internal_data = tmpbuffer;
holder->internal_data_len = len;
rebrick_clean_func_clone(&cleanfuncs, holder->client_data);
rebrick_clean_func_t cleanfunc = {.func = clean_send_data_holder, .ptr = holder};
//client datası olduğunu belirtmek için source 1 yapılıyor
cleanfunc.anydata.source = 1;
result = rebrick_tcpsocket_send(cast_to_tcp_socket(socket), tmpbuffer, tmplen, cleanfunc);
if (result < 0)
{
free(holder);
free(tmpbuffer);
}
}
rebrick_buffers_destroy(buffertmp);
}
//flush_buffers(socket);
return result;
}
int32_t rebrick_tlssocket_change_context(rebrick_tlssocket_t *socket, const char *servername)
{
char current_time_str[32] = {0};
unused(current_time_str);
int32_t result;
unused(result);
if (!socket || !servername)
{
rebrick_log_error("socket or servername is null\n");
return REBRICK_ERR_BAD_ARGUMENT;
}
rebrick_tls_context_t *context;
result = rebrick_tls_context_get(servername, &context);
if (result < 0)
{
rebrick_log_error("error at finding context for servername:%s\n ", servername);
return result;
}
strncpy(socket->sni, servername, REBRICK_TLS_SNI_MAX_LEN - 1);
socket->tls_context = context;
SSL_set_SSL_CTX(socket->tls->ssl, context->tls_ctx);
return REBRICK_SUCCESS;
}<file_sep>#include "rebrick_httpsocket.h"
static void local_on_error_occured_callback(rebrick_socket_t *ssocket, void *callbackdata, int error)
{
unused(ssocket);
unused(callbackdata);
unused(error);
rebrick_httpsocket_t *httpsocket = cast(ssocket, rebrick_httpsocket_t *);
if (httpsocket)
{
if (httpsocket->override_override_on_error_occured)
httpsocket->override_override_on_error_occured(cast_to_base_socket(httpsocket), httpsocket->override_override_callback_data, error);
}
}
static void local_on_connection_accepted_callback(rebrick_socket_t *ssocket, void *callback_data, const struct sockaddr *addr, void *client_handle)
{
unused(ssocket);
unused(callback_data);
unused(addr);
unused(client_handle);
char current_time_str[32] = {0};
unused(current_time_str);
int32_t result;
unused(result);
rebrick_httpsocket_t *httpsocket = cast(ssocket, rebrick_httpsocket_t *);
if (httpsocket)
{
if (httpsocket->override_override_on_connection_accepted)
httpsocket->override_override_on_connection_accepted(cast_to_base_socket(httpsocket), httpsocket->override_override_callback_data, addr, client_handle);
}
}
static void local_on_connection_closed_callback(rebrick_socket_t *ssocket, void *callback_data)
{
unused(ssocket);
unused(callback_data);
char current_time_str[32] = {0};
unused(current_time_str);
int32_t result;
unused(result);
rebrick_httpsocket_t *httpsocket = cast(ssocket, rebrick_httpsocket_t *);
if (httpsocket)
{
if (httpsocket->tmp_buffer)
rebrick_buffer_destroy(httpsocket->tmp_buffer);
if (httpsocket->header)
rebrick_http_header_destroy(httpsocket->header);
if (httpsocket->override_override_on_connection_closed)
httpsocket->override_override_on_connection_closed(cast_to_base_socket(httpsocket), httpsocket->override_override_callback_data);
}
}
static void local_on_data_sended_callback(rebrick_socket_t *ssocket, void *callback_data, void *source)
{
unused(ssocket);
unused(callback_data);
unused(source);
char current_time_str[32] = {0};
unused(current_time_str);
int32_t result;
unused(result);
rebrick_httpsocket_t *httpsocket = cast(ssocket, rebrick_httpsocket_t *);
if (httpsocket)
{
if (httpsocket->override_override_on_data_sended)
httpsocket->override_override_on_data_sended(cast_to_base_socket(httpsocket), httpsocket->override_override_callback_data, source);
}
}
#define call_on_error(httpsocket, error) \
if (httpsocket->override_override_on_error_occured) \
{ \
httpsocket->override_override_on_error_occured(cast_to_base_socket(httpsocket), httpsocket->override_override_callback_data, error); \
}
static void local_after_data_received_callback(rebrick_socket_t *socket, void *callback_data, const struct sockaddr *addr, const char *buffer, ssize_t len)
{
unused(socket);
unused(callback_data);
unused(addr);
unused(buffer);
unused(len);
char current_time_str[32] = {0};
unused(current_time_str);
int32_t result;
unused(result);
if (!socket){
rebrick_log_fatal("socket argument is null\n");
return ;
}
rebrick_httpsocket_t *httpsocket = cast_to_http_socket(socket);
if (httpsocket->override_override_on_data_received)
httpsocket->override_override_on_data_received(cast_to_base_socket(httpsocket), httpsocket->override_override_callback_data, addr, buffer, len);
if (httpsocket->is_header_parsed)
{
if (httpsocket->on_http_body_received)
{
httpsocket->content_received_length += len;
httpsocket->on_http_body_received(cast_to_base_socket(httpsocket), httpsocket->override_override_callback_data, addr,
buffer, len);
}
}
else
{
if (httpsocket->tmp_buffer)
{
result = rebrick_buffer_add(httpsocket->tmp_buffer, cast(buffer, uint8_t *), len);
}
else
{
result = rebrick_buffer_new(&httpsocket->tmp_buffer, cast(buffer, uint8_t *), len, REBRICK_HTTP_BUFFER_MALLOC);
}
if (result < 0)
{
call_on_error(httpsocket, result);
return;
}
httpsocket->parsing_params.num_headers = sizeof(httpsocket->parsing_params.headers) / sizeof(httpsocket->parsing_params.headers[0]);
int32_t pret = 0;
int32_t is_request_header = FALSE;
//check request or response
if (httpsocket->tmp_buffer->len < 5)
{
rebrick_log_fatal("httpsocket tmp buffer len is<5\n");
return ;
}
//small lower buffer of started data
if ((httpsocket->header == NULL && strncasecmp(cast(httpsocket->tmp_buffer->buf, const char *), "HTTP/", 5) == 0) || !httpsocket->header->is_request)
{
pret = phr_parse_response(cast(httpsocket->tmp_buffer->buf, const char *),
httpsocket->tmp_buffer->len,
&httpsocket->parsing_params.minor_version,
&httpsocket->parsing_params.status,
&httpsocket->parsing_params.status_msg,
&httpsocket->parsing_params.status_msg_len,
httpsocket->parsing_params.headers,
&httpsocket->parsing_params.num_headers, httpsocket->parsing_params.pos);
is_request_header = FALSE;
}
else
{
is_request_header = TRUE;
pret = phr_parse_request(cast(httpsocket->tmp_buffer->buf, const char *),
httpsocket->tmp_buffer->len,
&httpsocket->parsing_params.method, &httpsocket->parsing_params.method_len,
&httpsocket->parsing_params.path, &httpsocket->parsing_params.path_len,
&httpsocket->parsing_params.minor_version,
httpsocket->parsing_params.headers, &httpsocket->parsing_params.num_headers, httpsocket->parsing_params.pos);
}
if (pret == -1)
{
rebrick_log_error("header parse error\n");
call_on_error(httpsocket, REBRICK_ERR_HTTP_HEADER_PARSE);
return ;
}
if (httpsocket->tmp_buffer->len >= REBRICK_HTTP_MAX_HEADER_LEN)
{
rebrick_log_error("http max header len exceed\n");
call_on_error(httpsocket, REBRICK_HTTP_MAX_HEADER_LEN);
return ;
}
httpsocket->parsing_params.pos = pret;
if (pret > 0)
{
if (!httpsocket->header)
{
if (is_request_header)
{
result = rebrick_http_header_new2(&httpsocket->header,
httpsocket->parsing_params.method,
httpsocket->parsing_params.method_len,
httpsocket->parsing_params.path,
httpsocket->parsing_params.path_len,
httpsocket->parsing_params.minor_version == 1 ? 1 : 2,
httpsocket->parsing_params.minor_version);
}
else
{
result = rebrick_http_header_new4(&httpsocket->header,
httpsocket->parsing_params.status,
httpsocket->parsing_params.status_msg,
httpsocket->parsing_params.status_msg_len,
httpsocket->parsing_params.minor_version == 1 ? 1 : 2,
httpsocket->parsing_params.minor_version);
}
if (result < 0)
{
rebrick_log_error("new header create error\n");
call_on_error(httpsocket, REBRICK_ERR_HTTP_HEADER_PARSE);
}
}
for (size_t i = 0; i < httpsocket->parsing_params.num_headers; ++i)
{
struct phr_header *header = httpsocket->parsing_params.headers + i;
result = rebrick_http_header_add_header2(httpsocket->header, header->name, header->name_len, header->value, header->value_len);
if (result < 0)
{
rebrick_log_error("adding header to headers error\n");
call_on_error(httpsocket, REBRICK_ERR_HTTP_HEADER_PARSE);
}
}
httpsocket->is_header_parsed = TRUE;
httpsocket->header_len = pret;
//http header finished
if (httpsocket->on_http_header_received)
httpsocket->on_http_header_received(cast_to_base_socket(httpsocket), httpsocket->override_override_callback_data, httpsocket->header);
//if there is data after header parsed in buffer
//call on_http_body
if (cast(httpsocket->tmp_buffer->len, ssize_t) > pret)
{
if (httpsocket->on_http_body_received)
{
size_t length_remain = httpsocket->tmp_buffer->len - pret;
size_t offset = httpsocket->tmp_buffer->len - length_remain;
httpsocket->content_received_length += length_remain;
httpsocket->on_http_body_received(cast_to_base_socket(httpsocket), httpsocket->override_override_callback_data, addr,
cast(httpsocket->tmp_buffer->buf + offset, char *), length_remain);
}
}
}
}
}
static struct rebrick_tcpsocket *local_create_client()
{
char current_time_str[32] = {0};
unused(current_time_str);
rebrick_httpsocket_t *client = new (rebrick_httpsocket_t);
constructor(client, rebrick_httpsocket_t);
return cast(client, rebrick_tcpsocket_t *);
}
int32_t rebrick_httpsocket_init(rebrick_httpsocket_t *httpsocket, rebrick_tls_context_t *tls_context, rebrick_sockaddr_t addr, void *callback_data,
rebrick_on_connection_accepted_callback_t on_connection_accepted,
rebrick_on_connection_closed_callback_t on_connection_closed,
rebrick_on_data_received_callback_t on_data_received,
rebrick_on_data_sended_callback_t on_data_sended,
rebrick_on_error_occured_callback_t on_error_occured, int32_t backlog_or_isclient,
rebrick_on_http_header_received_callback_t on_http_header_received,
rebrick_on_http_body_received_callback_t on_http_body_received, rebrick_tcpsocket_create_client_t create_client)
{
char current_time_str[32] = {0};
unused(current_time_str);
int32_t result;
unused(result);
httpsocket->override_override_tls_context = tls_context;
if (tls_context)
{
result = rebrick_tlssocket_init(cast_to_tls_socket(httpsocket), tls_context, addr, NULL, local_on_connection_accepted_callback, local_on_connection_closed_callback, local_after_data_received_callback, local_on_data_sended_callback, local_on_error_occured_callback, backlog_or_isclient, create_client);
}
else
{
result = rebrick_tcpsocket_init(cast_to_tcp_socket(httpsocket), addr, NULL, local_on_connection_accepted_callback, local_on_connection_closed_callback, local_after_data_received_callback, local_on_data_sended_callback, local_on_error_occured_callback, backlog_or_isclient, create_client);
}
if (result < 0)
{
rebrick_log_error("http socket creation failed with eror:%d\n", result);
return result;
}
httpsocket->override_override_on_connection_accepted = on_connection_accepted;
httpsocket->override_override_on_connection_closed = on_connection_closed;
httpsocket->override_override_on_data_received = on_data_received;
httpsocket->override_override_on_data_sended = on_data_sended;
httpsocket->override_override_on_error_occured = on_error_occured;
httpsocket->override_override_callback_data = callback_data;
httpsocket->on_http_header_received = on_http_header_received;
httpsocket->on_http_body_received = on_http_body_received;
return REBRICK_SUCCESS;
}
int32_t rebrick_httpsocket_new(rebrick_httpsocket_t **socket, rebrick_tls_context_t *tls_context, rebrick_sockaddr_t addr, void *callback_data,
rebrick_on_connection_accepted_callback_t on_connection_accepted,
rebrick_on_connection_closed_callback_t on_connection_closed,
rebrick_on_data_received_callback_t on_data_received,
rebrick_on_data_sended_callback_t on_data_sended,
rebrick_on_error_occured_callback_t on_error_occured, int32_t backlog_or_isclient,
rebrick_on_http_header_received_callback_t on_http_header_received,
rebrick_on_http_body_received_callback_t on_http_body_received)
{
char current_time_str[32] = {0};
unused(current_time_str);
int32_t result;
unused(result);
rebrick_httpsocket_t *httpsocket = new (rebrick_httpsocket_t);
constructor(httpsocket, rebrick_httpsocket_t);
result = rebrick_httpsocket_init(httpsocket, tls_context, addr,
callback_data, on_connection_accepted, on_connection_closed, on_data_received, on_data_sended, on_error_occured, backlog_or_isclient,
on_http_header_received, on_http_body_received, local_create_client);
if (result < 0)
{
rebrick_log_error("http socket init failed with error:%d\n", result);
free(httpsocket);
return result;
}
*socket = httpsocket;
return REBRICK_SUCCESS;
}
int32_t rebrick_httpsocket_destroy(rebrick_httpsocket_t *socket)
{
unused(socket);
if (socket)
{
if (socket->override_override_tls_context)
{
return rebrick_tlssocket_destroy(cast_to_tls_socket(socket));
}
else
{
return rebrick_tcpsocket_destroy(cast_to_tcp_socket(socket));
}
}
return REBRICK_SUCCESS;
}
int32_t rebrick_httpsocket_reset(rebrick_httpsocket_t *socket)
{
if (socket)
{
if (socket->tmp_buffer)
rebrick_buffer_destroy(socket->tmp_buffer);
socket->tmp_buffer = NULL;
if (socket->header)
rebrick_http_header_destroy(socket->header);
socket->header = NULL;
socket->is_header_parsed = FALSE;
socket->content_received_length = 0;
socket->header_len=0;
}
return REBRICK_SUCCESS;
}
int32_t rebrick_httpsocket_send(rebrick_httpsocket_t *socket, char *buffer, size_t len, rebrick_clean_func_t cleanfunc)
{
unused(socket);
unused(buffer);
unused(len);
unused(cleanfunc);
if (!socket || !buffer | !len)
return REBRICK_ERR_BAD_ARGUMENT;
if (socket->tls)
return rebrick_tlssocket_send(cast_to_tls_socket(socket), buffer, len, cleanfunc);
return rebrick_tcpsocket_send(cast_to_tcp_socket(socket), buffer, len, cleanfunc);
}<file_sep>#include "./common/rebrick_common.h"
#include "./common/rebrick_log.h"
#include "./common/rebrick_tls.h"
int main(){
char current_time_str[32]={0};
unused(current_time_str);
return 0;
}
<file_sep>#ifndef __REBRICK_HTTP_H__
#define __REBRICK_HTTP_H__
#include "../socket/rebrick_tlssocket.h"
#include "../common/rebrick_buffer.h"
#include "../lib/picohttpparser.h"
#include "../lib/uthash.h"
#define REBRICK_HTTP_VERSION1 1
#define REBRICK_HTTP_VERSION2 2
#define REBRICK_HTTP_BUFFER_MALLOC 8192
#define REBRICK_HTTP_MAX_HEADER_LEN 8192
#define REBRICK_HTTP_MAX_HEADER_KEY_LEN 128
#define REBRICK_HTTP_MAX_HOSTNAME_LEN 1024
#define REBRICK_HTTP_MAX_URI_LEN 8192
#define REBRICK_HTTP_MAX_PATH_LEN 8192
#define REBRICK_HTTP_MAX_METHOD_LEN 16
#define REBRICK_HTTP_MAX_STATUSCODE_LEN 64
#define REBRICK_HTTP_MAX_HEADERS 96
public_ typedef struct rebrick_http_key_value{
public_ readonly_ char *key;
public_ size_t keylen;
public_ readonly_ char *key_lower;
public_ readonly_ char *value;
public_ size_t valuelen;
UT_hash_handle hh;
}rebrick_http_key_value_t;
int32_t rebrick_http_key_value_new(rebrick_http_key_value_t **keyvalue,const char *key,const char *value);
int32_t rebrick_http_key_value_new2(rebrick_http_key_value_t **keyvalue,const void *key,size_t keylen,const void *value,size_t valuelen);
int32_t rebrick_http_key_value_destroy(rebrick_http_key_value_t *keyvalue);
public_ typedef struct rebrick_http_header{
base_object();
public_ char path[REBRICK_HTTP_MAX_PATH_LEN];
public_ char method[REBRICK_HTTP_MAX_METHOD_LEN];
public_ int8_t major_version;
public_ int8_t minor_version;
public_ int16_t status_code;
public_ char status_code_str[REBRICK_HTTP_MAX_STATUSCODE_LEN];
public_ rebrick_http_key_value_t *headers;
public_ int32_t is_request;
}rebrick_http_header_t;
int32_t rebrick_http_header_new(rebrick_http_header_t **header,const char *method,const char *path,int8_t major,int8_t minor);
int32_t rebrick_http_header_new2(rebrick_http_header_t **header,const void *method,size_t method_len,const void *path,size_t path_len,int8_t major,int8_t minor);
int32_t rebrick_http_header_new3(rebrick_http_header_t **header,int32_t status,const char *status_code,int8_t major,int8_t minor);
int32_t rebrick_http_header_new4(rebrick_http_header_t **header,int32_t status,const void *status_code,size_t status_code_len,int8_t major,int8_t minor);
int32_t rebrick_http_header_add_header(rebrick_http_header_t *header,const char *key,const char*value);
int32_t rebrick_http_header_add_header2(rebrick_http_header_t *header,const char *key,size_t keylen,const char*value,size_t valuelen);
int32_t rebrick_http_header_contains_key(rebrick_http_header_t *header,const char *key,int32_t *founded);
int32_t rebrick_http_header_get_header(rebrick_http_header_t *header,const char *key,const char **value);
int32_t rebrick_http_header_remove_key(rebrick_http_header_t *header,const char *key);
int32_t rebrick_http_header_destroy(rebrick_http_header_t *header);
int32_t rebrick_http_header_to_buffer(rebrick_http_header_t *header,rebrick_buffer_t **buffer);
/* public_ typedef struct rebrick_http_body{
base_object();
public_ rebrick_buffers_t *body;
}rebrick_http_body_t; */
#endif<file_sep>#include "./common/rebrick_context.h"
#include "cmocka.h"
static int setup(void**state){
unused(state);
fprintf(stdout,"**** %s ****\n",__FILE__);
return 0;
}
static int teardown(void **state){
unused(state);
return 0;
}
static void context_object_create(void **start){
unused(start);
rebrick_context_t *context=NULL;
//set some ptr;
void *ptr=(void*)10;
int32_t result=rebrick_context_new(&context,ptr,ptr);
assert_true(result==0);
assert_non_null(context);
assert_non_null(context->config);
assert_non_null(context->metrics);
rebrick_context_destroy(context);
}
int test_rebrick_context(void) {
const struct CMUnitTest tests[] = {
cmocka_unit_test(context_object_create)
};
return cmocka_run_group_tests(tests, setup, teardown);
}
<file_sep>#ifndef __REBRICK_TLSSOCKET_H__
#define __REBRICK_TLSSOCKET_H__
#include "../common/rebrick_tls.h"
#include "./rebrick_tcpsocket.h"
#include "../common/rebrick_buffers.h"
/**
* @brief openssl is an interesting library, that sometimes you need pending data
*
*/
protected_ typedef struct pending_data{
base_object();
rebrick_buffers_t *data;
/**
* @brief clean function
*
*/
rebrick_clean_func_t *clean_func;
/**
* @brief this fields prev and next means, this is a list
*
*/
struct pending_data *prev,*next;
}pending_data_t;
#define base_ssl_socket() \
base_tcp_socket(); \
private_ const rebrick_tls_context_t *tls_context; \
private_ rebrick_tls_ssl_t *tls; \
private_ rebrick_on_connection_accepted_callback_t override_on_connection_accepted; \
private_ rebrick_on_connection_closed_callback_t override_on_connection_closed; \
private_ rebrick_on_data_received_callback_t override_on_data_received; \
private_ rebrick_on_data_sended_callback_t override_on_data_sended; \
private_ rebrick_on_error_occured_callback_t override_on_error_occured;\
private_ void *override_callback_data; \
private_ pending_data_t *pending_write_list; \
private_ int32_t called_override_after_connection_accepted; \
public_ readonly_ int32_t is_server;\
private_ int32_t sslhandshake_initted; \
public_ readonly_ char sni_pattern_or_name[REBRICK_TLS_SNI_MAX_LEN];\
public_ readonly_ char sni[REBRICK_TLS_SNI_MAX_LEN];
public_ typedef struct rebrick_tlssocket
{
base_ssl_socket()
} rebrick_tlssocket_t;
#define cast_to_tls_socket(x) cast(x, rebrick_tlssocket_t *)
/**
* @brief
*
* @param socket socket pointer
* @param tls_context tls context
* @param bind_addr bind address and port
* @param dst_addr destination address and port, if port is zero then only listening socket opens
* @param callback_data, callback data parameter for every callback
* @param on_data_received data received callback
* @param on_data_sended
* @return int32_t
*/
int32_t rebrick_tlssocket_new(rebrick_tlssocket_t **socket, const rebrick_tls_context_t *tls_context, rebrick_sockaddr_t addr, void *callback_data,
rebrick_on_connection_accepted_callback_t on_connection_accepted,
rebrick_on_connection_closed_callback_t on_connection_closed,
rebrick_on_data_received_callback_t on_data_received,
rebrick_on_data_sended_callback_t on_data_sended,
rebrick_on_error_occured_callback_t on_error_occured, int32_t backlog_or_isclient);
/*
* @brief creates a tls socket with SNI(server name indication) pattern
*
* @param socket socket pointer
* @param sni pattern or name for finding client sni or setting client sni
* @param dst_addr destination address and port, if port is zero then only listening socket opens
* @param callback_data, callback data parameter for every callback
* @param on_data_received data received callback
* @param on_data_sended
* @return int32_t
*/
int32_t rebrick_tlssocket_new2(rebrick_tlssocket_t **socket, const char *sni_pattern_or_name, rebrick_tls_context_t *tls_context, rebrick_sockaddr_t addr, void *callback_data,
rebrick_on_connection_accepted_callback_t on_connection_accepted,
rebrick_on_connection_closed_callback_t on_connection_closed,
rebrick_on_data_received_callback_t on_data_received,
rebrick_on_data_sended_callback_t on_data_sended,
rebrick_on_error_occured_callback_t on_error_occured, int32_t backlog_or_isclient);
/**
* @brief inits a socket, think init functions like contructors in OOP
*
* @param socket
* @param tls_context
* @param addr
* @param callback_data
* @param on_connection_accepted
* @param on_connection_closed
* @param on_data_received
* @param on_data_sended
* @param on_error_occured
* @param backlog_or_isclient
* @param create_client
* @return int32_t
*/
int32_t rebrick_tlssocket_init(rebrick_tlssocket_t *socket, const rebrick_tls_context_t *tls_context, rebrick_sockaddr_t addr, void *callback_data,
rebrick_on_connection_accepted_callback_t on_connection_accepted,
rebrick_on_connection_closed_callback_t on_connection_closed,
rebrick_on_data_received_callback_t on_data_received,
rebrick_on_data_sended_callback_t on_data_sended,
rebrick_on_error_occured_callback_t on_error_occured, int32_t backlog_or_isclient,rebrick_tcpsocket_create_client_t create_client);
int32_t rebrick_tlssocket_destroy(rebrick_tlssocket_t *socket);
int32_t rebrick_tlssocket_send(rebrick_tlssocket_t *socket, char *buffer, size_t len, rebrick_clean_func_t cleanfunc);
int32_t rebrick_tlssocket_change_context(rebrick_tlssocket_t *socket,const char *servername);
#endif<file_sep>#ifndef __REBRICK_UDPSOCKET_H__
#define __REBRICK_UDPSOCKET_H__
#include "rebrick_socket.h"
public_ typedef struct rebrick_udpsocket
{
base_socket();
} rebrick_udpsocket_t;
/**
* @brief
*
* @param socket socket pointer
* @param bind_addr bind address and port
* @param dst_addr destination address and port, if port is zero then only listening socket opens
* @param callback_data, callback data parameter for every callback
* @param on_data_received data received callback
* @param on_data_sended
* @return int32_t
*/
int32_t rebrick_udpsocket_new(rebrick_udpsocket_t **socket,
rebrick_sockaddr_t bind_addr,
void *callback_data,
rebrick_on_data_received_callback_t on_data_received,
rebrick_on_data_sended_callback_t on_data_sended,
rebrick_on_error_occured_callback_t on_error_occured);
int32_t rebrick_udpsocket_destroy(rebrick_udpsocket_t *socket);
int32_t rebrick_udpsocket_send(rebrick_udpsocket_t *socket, rebrick_sockaddr_t *dst_addr, char *buffer, size_t len, rebrick_clean_func_t clean_func);
#endif<file_sep>CFLAGS = -Wall -W -O0 -g -ggdb -std=gnu11 -I$(shell pwd)/../external/libs/include -DREBRICK_DEBUG
LDFLAGS = -L$(shell pwd)/../external/libs/lib -luv -lssl -lcrypto -lnghttp2
CFLAGSTEST = -Wall -Wno-unused-function -W -O0 -g -ggdb -std=gnu11 -I$(shell pwd)/../src -I$(shell pwd)/../external/libs/include -DREBRICK_DEBUG2
LDFLAGSTEST = -L$(shell pwd)/../external/libs/lib -lcmocka -luv -lpthread -lssl -lcrypto
OUTPUT = rebrick
SRC = src
TEST = test
OBJS = main_rebrick.o ./common/rebrick_util.o ./common/rebrick_config.o ./socket/rebrick_udpsocket.o ./socket/rebrick_tcpsocket.o ./common/rebrick_tls.o \
./socket/rebrick_tlssocket.o ./http/rebrick_http.o ./http/rebrick_httpsocket.o \
./common/rebrick_context.o ./common/rebrick_metrics.o ./common/rebrick_buffers.o ./common/rebrick_buffer.o ./lib/b64/decode.o ./lib/b64/encode.o ./lib/picohttpparser.o \
OBJSTEST = test.o ./server_client/udpecho.o ./server_client/tcpecho.o test_rebrick_util.o \
test_rebrick_config.o test_rebrick_context.o test_rebrick_metrics.o \
test_rebrick_tls.o \
test_rebrick_udpsocket.o test_rebrick_tcpsocket.o test_rebrick_tlssocket.o test_rebrick_http.o test_rebrick_httpsocket.o test_rebrick_buffer.o test_rebrick_buffers.o \
../src/common/rebrick_config.o ../src/common/rebrick_util.o ../src/common/rebrick_context.o ../src/common/rebrick_metrics.o \
../src/socket/rebrick_udpsocket.o ../src/socket/rebrick_tcpsocket.o ../src/common/rebrick_buffer.o ../src/common/rebrick_buffers.o\
../src/lib/b64/encode.o ../src/lib/b64/decode.o ../src/lib/picohttpparser.o \
../src/common/rebrick_tls.o ../src/socket/rebrick_tlssocket.o ../src/http/rebrick_http.o ../src/http/rebrick_httpsocket.o
ifeq ($(TEST),TRUE)
%.o: %.c
$(CC) -c -o $@ $< $(CFLAGSTEST)
else
%.o: %.c
$(CC) -c -o $@ $< $(CFLAGS)
endif
all:clean
@cd $(SRC) && make -f ../Makefile $(OUTPUT)
rebrick : $(OBJS)
$(CC) -o $(OUTPUT) $(OBJS) $(LDFLAGS)
check:clean
@cd $(TEST) && make TEST=TRUE -f ../Makefile testrun
checkvalgrind:clean
@cd $(TEST) && make TEST=TRUE -f ../Makefile testrunvalgrind
buildtest:
@cd $(TEST) && make TEST=TRUE -f ../Makefile test
test : $(OBJSTEST)
$(CC) -o test $(OBJSTEST) $(LDFLAGSTEST)
testrun: test
LD_LIBRARY_PATH=$(shell pwd)/../external/libs/lib LISTEN_PORT=9090 LISTEN_FAMILY=IPV4_IPV6 ./test
testrunvalgrind: test
LD_LIBRARY_PATH=$(shell pwd)/../external/libs/lib LISTEN_PORT=9090 LISTEN_FAMILY=IPV4_IPV6 valgrind -v --track-origins=yes --leak-check=full --show-leak-kinds=all --gen-suppressions=all --suppressions=$(shell pwd)/valgrind.options ./test
clean:
find ./$(SRC) -name "*.o" -type f -delete
find ./$(TEST) -name "*.o" -type f -delete
rm -rf $(SRC)/rebrick
rm -rf $(TEST)/test
rm -rf output
rm -rf out
<file_sep>#include "./common/rebrick_metrics.h"
#include "cmocka.h"
static int setup(void**state){
unused(state);
fprintf(stdout,"**** %s ****\n",__FILE__);
return 0;
}
static int teardown(void **state){
unused(state);
return 0;
}
static void metrics_object_create_destroy_success(void **start){
unused(start);
rebrick_metrics_t *metrics=NULL;
int32_t result;
result=rebrick_metrics_new(&metrics);
assert_true(result>=0);
assert_non_null(metrics);
assert_string_equal(metrics->type_name,"rebrick_metrics_t");
rebrick_metrics_destroy(metrics);
}
static void metrics_tostring(void **start){
unused(start);
rebrick_metrics_t *metrics=NULL;
int32_t result;
result=rebrick_metrics_new(&metrics);
assert_int_equal(result,REBRICK_SUCCESS);
metrics->start_time=1;
metrics->current_time=30;
metrics->received_total=2;
metrics->received_error_total=3;
metrics->received_success_total=4;
metrics->forward_total=5;
metrics->forward_error_total=6;
metrics->forward_success_total=7;
char buffer[REBRICK_METRICS_MAX_STR_LEN];
rebrick_metrics_tostring(metrics,buffer);
const char *mustbuffer="start_time:1\n\
current_time:30\n\
received_total:2\n\
received_error_total:3\n\
received_success_total:4\n\
forward_total:5\n\
forward_error_total:6\n\
forward_success_total:7\n";
assert_string_equal(buffer,mustbuffer);
rebrick_metrics_destroy(metrics);
}
int test_rebrick_metrics(void) {
const struct CMUnitTest tests[] = {
cmocka_unit_test(metrics_object_create_destroy_success),
cmocka_unit_test(metrics_tostring)
};
return cmocka_run_group_tests(tests, setup, teardown);
}
<file_sep>#ifndef __REBRICK_CONTEXT_H__
#define __REBRICK_CONTEXT_H__
#include "rebrick_common.h"
#include "rebrick_config.h"
#include "rebrick_metrics.h"
#include "rebrick_log.h"
/**
* @brief context object that holds system configration,and all static data
*
*/
public_ typedef struct rebrick_context
{
base_object();
public_ readonly_ rebrick_config_t *config;
public_ readonly_ rebrick_metrics_t *metrics;
}rebrick_context_t;
/**
* @brief Create a roksit default context object
*
* @return int32_t not 0 then error
*/
int32_t rebrick_context_new(rebrick_context_t **context,rebrick_config_t *config, rebrick_metrics_t *metrics);
void rebrick_context_destroy(rebrick_context_t *context);
#endif //
<file_sep>#ifndef __REBRICK_COMMON_H__
#define __REBRICK_COMMON_H__
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <time.h>
#include <ctype.h>
#include <stdarg.h>
#include <stddef.h>
#include <setjmp.h>
#include <stdarg.h>
#include <inttypes.h>
#include "uv.h"
#include <openssl/err.h>
#include <openssl/ssl.h>
#include <openssl/conf.h>
#include <openssl/engine.h>
#define TRUE 1
#define FALSE 0
/**
* @brief errors and success, errors < 0
*/
#define REBRICK_SUCCESS 0
#define REBRICK_ERR_UV -10000
#define REBRICK_ERR_MALLOC -1 //0xFFFFFFFF
#define REBRICK_ERR_CONFIG_CREATE -2 //0xFFFFFFFE
#define REBRICK_ERR_BAD_ARGUMENT -3
#define REBRICK_ERR_SPRINTF -4
#define REBRICK_ERR_BAD_CONFIG_ARGUMENT -5
#define REBRICK_ERR_BAD_IP_PORT_ARGUMENT -6
#define REBRICK_ERR_ASSERT_NOT_NULL -7
#define REBRICK_ERR_MORE_BUFFER -8
#define REBRICK_ERR_CLIENT_CREATE -9
#define REBRICK_ERR_IO_CLOSED -10
#define REBRICK_ERR_IO_CLOSING -11
#define REBRICK_ERR_IO_ERR -12
#define REBRICK_ERR_LEN_NOT_ENOUGH -13
#define REBRICK_ERR_TLS_INIT -20
#define REBRICK_ERR_TLS_NEW -21
#define REBRICK_ERR_TLS_ERR -22
#define REBRICK_ERR_TLS_INIT_NOT_FINISHED -23
#define REBRICK_ERR_TLS_READ -24
#define REBRICK_ERR_TLS_WRITE -25
#define REBRICK_ERR_TLS_CLOSED -26
#define REBRICK_ERR_NOT_FOUND -50
#define REBRICK_ERR_HTTP_HEADER_PARSE -100
#define HAS_UV_ERR(result) ((result)<REBRICK_ERR_UV)
#define UV_ERR(result) (result)-(REBRICK_ERR_UV)
/*
* @brief every struct has a type name, sometimes we are using for detect memory leak
*/
#define REBRICK_STRUCT_NAME_LEN 32
/*
* @brief ip max string len
*/
#define REBRICK_IP_STR_LEN 64
#define REBRICK_PORT_STR_LEN 8
#define REBRICK_TLS_KEY_LEN 128
#define REBRICK_CA_VERIFY_PATH_MAX_LEN 1024
/* @brief allocation methods */
#define new(x) malloc(sizeof(x))
#define constructor(x,y) \
if(!x) { \
rebrick_log_fatal("malloc problem\n");\
exit(1);\
} \
fill_zero(x,sizeof(y));\
strcpy(x->type_name,#y);
#define new_array(x, len) malloc(sizeof(x) * (len))
#define fill_zero(x, size) memset((x), 0, (size))
#define cast(x, y) ((y)x)
#define unused(x) (void)(x)
#define if_is_null_then_die(x,y) if(!x){ \
rebrick_log_fatal(y);\
exit(1);\
}
/**
* @brief base class for every structs
*
*/
#define base_object(x) public_ readonly_ char type_name[REBRICK_STRUCT_NAME_LEN]
#define typeof(x,y) !strcmp((x)->type_name,#y)
#define ssizeof(x) cast(sizeof(x),int32_t)
#define cast_to_uint8ptr(x) cast(x,uint8_t*)
#define cast_to_const_uint8ptr(x) cast(x, const uint8_t*)
#define cast_to_charptr(x) cast(x,char *)
#define cast_to_const_charptr(x) cast(x,cont char*)
#define public_
#define private_
#define readonly_
#define protected_
#define internal_
/**
* @brief socket address union
*
*/
typedef union rebrick_sockaddr {
struct sockaddr base;
struct sockaddr_in v4;
struct sockaddr_in6 v6;
}rebrick_sockaddr_t;
#endif
| 4ff85c8feb8dad513bdee65f24edf16df9e517b6 | [
"Makefile",
"C"
] | 24 | Makefile | zhangjinde/rebrick.io | e030b81e1dc79db8bb473b3afb665af03c92cd7b | b1badba16329fc3ed234db68569983ffc6321b13 |
refs/heads/master | <file_sep>import numpy as np
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
pass
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
pssm = np.zeros((20, 20))
return np.rint(pssm).astype(np.int64)
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return (-1, -1)
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
return 'NOPE'
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
weights = np.zeros(10)
return weights.astype(np.float64)
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
num_obs = -1
return num_obs.astype(np.float64)
<file_sep>##############
# Exercise 2.5
##############
# You can use the supplied test cases for your own testing. Good luck!
from typing import List, Tuple
start = "ATG"
stop1 = "TAA"
stop2 = "TAG"
stop3 = "TGA"
codon_dict = {
"TTT": "F", "TTC": "F", "TTA": "L", "TTG": "L",
"TCT": "S", "TCC": "S", "TCA": "S", "TCG": "S",
"TAT": "Y", "TAC": "Y", "TAA": "STOP", "TAG": "STOP",
"TGT": "C", "TGC": "C", "TGA": "STOP", "TGG": "W",
"CTT": "L", "CTC": "L", "CTA": "L", "CTG": "L",
"CCT": "P", "CCC": "P", "CCA": "P", "CCG": "P",
"CAT": "H", "CAC": "H", "CAA": "Q", "CAG": "Q",
"CGT": "R", "CGC": "R", "CGA": "R", "CGG": "R",
"ATT": "I", "ATC": "I", "ATA": "I", "ATG": "M",
"ACT": "T", "ACC": "T", "ACA": "T", "ACG": "T",
"AAT": "N", "AAC": "N", "AAA": "K", "AAG": "K",
"AGT": "S", "AGC": "S", "AGA": "R", "AGG": "R",
"GTT": "V", "GTC": "V", "GTA": "V", "GTG": "V",
"GCT": "A", "GCC": "A", "GCA": "A", "GCG": "A",
"GAT": "D", "GAC": "D", "GAA": "E", "GAG": "E",
"GGT": "G", "GGC": "G", "GGA": "G", "GGG": "G",
}
def complementary(orf: str) -> str:
"""
Parses characters of an input string sequence.
:param input_string: sequence as string
:return: new_string: parsed string sequence with exchanged characters
"""
reversed = ''
for char in orf:
if char == "A":
reversed += "T"
elif char == "T":
reversed += "A"
elif char == "G":
reversed += "C"
elif char == "C":
reversed += "G"
else:
reversed += char
return reversed
def get_orfs_in_frame(frame: str, circular_overflow: str, i: int, result) -> List[Tuple]:
"""
collects all valid orfs in a frame
:param frame: current reading frame
:param i: iteration, entry in the list of frames,
0-2 = regular with offsets, 3-5= compl. reverse with offsets
:return: list with all found orfs in the given frame
"""
found_orfs = []
found_stops = []
start_indizes = [i for i in range(0, len(frame), 3) if frame[i:i + 3] == start]
for start_idx in start_indizes:
stop_indizes = []
len_base_sequence = len(frame[start_idx:])
circular_sequence = frame[start_idx:] + circular_overflow + frame[:start_idx]
# check circular sequence for stop codons
stop_indizes.extend(
[i for i in range(0, len(circular_sequence), 3) if
circular_sequence[i:i + 3] == stop1])
stop_indizes.extend(
[i for i in range(0, len(circular_sequence), 3) if
circular_sequence[i:i + 3] == stop2])
stop_indizes.extend(
[i for i in range(0, len(circular_sequence), 3) if
circular_sequence[i:i + 3] == stop3])
if stop_indizes:
# get first stop codon
idx_stop = min(stop_indizes)
orf = circular_sequence[:idx_stop]
# default for later check if stop index is in circular area
is_circular = False
if len(orf) > 99:
# if long enough, translate to protein
aa = codon_to_aa(orf)
# calculate stop index in primary strand
if idx_stop > len_base_sequence:
is_circular = True
idx_stop -= len_base_sequence
else:
idx_stop += start_idx
# parse indizes to get index of primary strand
is_reverse = i > 2
if is_reverse:
# reverse indizes to get index of primary strand
start_idx = len(frame) -1 - start_idx + len(circular_overflow)
idx_stop = len(frame) -1 - idx_stop + len(circular_overflow)
# stop index should be the last nt of stop codon
idx_stop -= 2
# subtract offset
start_idx -= i - 3
if not is_circular:
# substract offset
idx_stop -= i - 3
else:
# add offset
start_idx += i
if not is_circular:
idx_stop += i
# stop index should be the last nt of stop codon
idx_stop += 2
# make sure only longest codon for a stop index is taken
if idx_stop in found_stops:
continue
else:
stopsInResult = [orf[1] for orf in result]
try:
duplicate_idx = stopsInResult.index(idx_stop)
if len(aa) > len(result[duplicate_idx][2]):
del result[duplicate_idx]
else:
continue
except:
print()
found_stops.append(idx_stop)
found_orfs.append((start_idx, idx_stop, aa, is_reverse))
return found_orfs
def get_orfs(frame: str) -> List[Tuple]:
"""
Finds all proteins of a DNA sequence.
:param frame: DNA sequence
:return: List of polypeptides with indizes in the genome.
"""
result = []
# error handling of input
if not all(char in ["G", "T", "C", "A"] for char in frame):
raise TypeError("not a DNA sequence")
# get different offsets of genome with shifts from 0 to 2
reading_frames, circular_frames = get_offsets(frame)
# get complementary genome and reverse
comp_genome = complementary(frame)[::-1]
# get different offsets of reversed complementary genome with shifts from 0 to 2
comp_list, comp_circular_list = get_offsets(comp_genome)
reading_frames.extend(comp_list)
# save overflow in circular frame
circular_frames.extend(comp_circular_list)
for i, (frame, circular_frame) in enumerate(zip(reading_frames, circular_frames)):
# get all orfs for this frame
aa = get_orfs_in_frame(frame, circular_frame, i, result)
if aa:
result.extend(aa)
return result
def get_offsets(genome) -> List:
"""
Generates genomes with offset 0, 1 and 2 and cuts the end s.t. %3 = 0
:param genome: sequence string
:return: list of sequences
"""
offset_list = [genome, genome[1:], genome[2:]]
circular_list = ['', genome[0:1], genome[0:2]]
return offset_list, circular_list
def codon_to_aa(orf: str) -> str:
"""
Translates an orf string into an amino acid
:param orf: genome sequence
:return: amino acid sequence
"""
codons = get_codons(orf)
aa_seq = ''.join(codon_dict[c] for c in codons)
return aa_seq
def get_codons(orf: str) -> List:
"""
Parses an orf string into a list of codons by splitting string in pairs of three.
:param orf: genome sequence
:return: list of codons
"""
if len(orf) % 3 is not 0:
return None
return [orf[i:i + 3] for i in range(0, len(orf), 3)]
<file_sep># -*- coding: utf-8 -*-
"""
IMPORTANT!:
Before writing an email asking questions such as
'What does this input has to be like?' or
'What return value do you expect?' PLEASE read our
exercise sheet and the information in this template
carefully.
If something is still unclear, PLEASE talk to your
colleagues before writing an email!
If you experience technical issues or if you find a
bug we are happy to answer your questions. However,
in order to provide quick help in such cases we need
to avoid unnecessary emails such as the examples
shown above.
"""
from Bio.PDB.MMCIFParser import MMCIFParser # Tip: This module might be useful for parsing...
import numpy as np
############# Exercise 2: Protein Data Bank #############
# General remark: In our exercise every structure will have EXACTLY ONE model.
# This is true for nearly all X-Ray structures. NMR structures have several models.
class PDB_Parser:
CIF_PARSER = MMCIFParser() # parser object for reading in structure in CIF format
def __init__( self, path ):
'''
Initialize every PDB_Parser with a path to a structure-file in CIF format.
An example file is included in the repository (7ahl.cif).
Tip: Store the parsed structure in an object variable instead of parsing it
again & again ...
'''
CIF_PARSER = MMCIFParser()
self.structure = CIF_PARSER.get_structure('PHA-L',path) # Parse the structure once and re-use it in the functions below
# 3.8 Chains
def get_number_of_chains( self ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
Return:
Number of chains in this structure as integer.
'''
# chains = self.structure.get_chains()
from Bio.PDB import Selection
chain_list = Selection.unfold_entities(self.structure, 'C')
n_chains = len(chain_list)
return n_chains
# 3.9 Sequence
def get_sequence( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the amino acid sequence (single-letter alphabet!) of a given chain (chain_id)
in a Biopython.PDB structure as a string.
'''
from Bio.PDB.Polypeptide import PPBuilder
ppb = PPBuilder()
polypeptide = ppb.build_peptides(self.structure[0][chain_id])
seq = str(polypeptide[0].get_sequence())
return seq
# 3.10 Water molecules
def get_number_of_water_molecules( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the number of water molecules of a given chain (chain_id)
in a Biopython.PDB structure as an integer.
'''
chain = self.structure[0][chain_id]
wm=[]
for residue in chain.get_list():
residule_id = residue.get_id()
hetfiled = residule_id[0]
print(residule_id)
if hetfiled=="W":
wm.append(residue)
n_waters = len(set(wm))
return n_waters
# 3.11 C-Alpha distance
def get_ca_distance( self, chain_id_1, index_1, chain_id_2, index_2 ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id_1 : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
index_1 : index of a residue in a given chain in a Biopython.PDB structure
chain_id_2 : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
index_2 : index of a residue in a given chain in a Biopython.PDB structure
chain_id_1 and index_1 describe precisely one residue in a PDB structure,
chain_id_2 and index_2 describe the second residue.
Return:
Return the C-alpha (!) distance between the two residues, described by
chain_id_1/index_1 and chain_id_2/index_2. Round the returned value via int().
The reason for using two different chains as an input is that also the distance
between residues of different chains can be interesting.
Different chains in a PDB structure can either occur between two different proteins
(Heterodimers) or between different copies of the same protein (Homodimers).
'''
#
# residule1 = self.structure[0][chain_id_1][index_1]["CA"]
# residule2 = self.structure[0][chain_id_2][index_2]["CA"]
# ca_distance = abs(residule1-residule2)
# print(chain_id_1,index_1,chain_id_1,index_2)
residule1 = self.structure[0][chain_id_1][index_1]
al1 = [atom for atom in residule1.get_atoms() if atom.name =='CA']
residule2 = self.structure[0][chain_id_2][index_2]
al2 = [atom for atom in residule2.get_atoms() if atom.name =='CA']
if len(al1)>0 and len(al2)>0:
diff = al1[0].coord-al2[0].coord
ca_distance = np.sqrt(np.sum(diff * diff))
else:ca_distance = np.nan
# print(ca_distance)
return int(ca_distance)
# 3.12 Contact Map
def get_contact_map( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return a complete contact map (see description in exercise sheet)
for a given chain in a Biopython.PDB structure as numpy array.
The values in the matrix describe the c-alpha distance between all residues
in a chain of a Biopython.PDB structure.
Only integer values of the distance have to be given (see below).
'''
chain = self.structure[0][chain_id]
res_list = []
for i,residue in enumerate(chain.get_list()):
residule_id = residue.get_id()
al = [atom for atom in residue.get_atoms() if atom.name == 'CA']
if len(al)>0:
res_list.append((residule_id,residue))
length = len(res_list)
contact_map = np.zeros( (length,length), dtype=np.float32 )
for i in range(length):
for j in range(length):
k1,res1 = res_list[i]
k2,res2 = res_list[j]
contact_map[i,j]=self.get_ca_distance(chain_id,k1,chain_id,k2)
# contact_map[i][j] = abs(chain[i]-chain[j])
return contact_map.astype( np.int ) # return rounded (integer) values
# 3.13 B-Factors
def get_bfactors( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the B-Factors for all residues in a chain of a Biopython.PDB structure.
The B-Factors describe the mobility of an atom or a residue.
In a Biopython.PDB structure B-Factors are given for each atom in a residue.
Calculate the mean B-Factor for a residue by averaging over the B-Factor
of all atoms in a residue.
Sometimes B-Factors are not available for a certain residue;
(e.g. the residue was not resolved); insert np.nan for those cases.
Finally normalize your B-Factors using Standard scores (zero mean, unit variance).
You have to use np.nanmean, np.nanvar etc. if you have nan values in your array.
The returned data structure has to be a numpy array rounded again to integer.
'''
chain = self.structure[0][chain_id]
res_list = []
for i,residue in enumerate(chain.get_list()):
residule_id = residue.get_id()
al = [atom for atom in residue.get_atoms() if atom.name == 'CA']
if len(al)>0:
res_list.append(residue)
length = len(res_list)
b_factors = np.zeros( (length,), dtype=np.float32 )
for i in range(length):
res =res_list[i]
# caculate mean bfactor
ab = [atom.get_bfactor() for atom in res.get_atoms()]
ab = np.asarray(ab)
mean = np.mean(ab)
b_factors[i] = mean
# ca = res['CA']
# b_factors[i] = ca.get_bfactor()
mean = np.mean(b_factors)
sigma = np.std(b_factors)
b_factors = (b_factors-mean)/sigma
b_factors = b_factors.astype( np.int )
print(b_factors)
# from scipy import stats
# b_factors = stats.zscore(b_factors)
return b_factors.astype( np.int ) # return rounded (integer) values
def main():
print('PDB parser class.')
pdbp = PDB_Parser("/Users/wangyu/Documents/Protein_prediction/exercise/pp1ss19exercise2-exercise-ge56sen/tests/7ahl.cif")
# pdbp.get_sequence("C")
# pdbp.get_number_of_chains()
# pdbp.get_number_of_water_molecules("C")
# a = pdbp.get_ca_distance("C",236,"C",396)
# print(a)
# a = pdbp.get_ca_distance("E",1,"E",1)
# print(a)
#
# import json
# with open('/Users/wangyu/Documents/Protein_prediction/exercise/pp1ss19exercise2-exercise-ge56sen/tests/exe2_pdb_test.json') as json_file:
# json_data = json.load(json_file)
#
# json_data
# student_answer = pdbp.get_contact_map(json_data['contact_map_chain_1'])
# contact_map = np.load("/Users/wangyu/Documents/Protein_prediction/exercise/pp1ss19exercise2-exercise-ge56sen/tests/contact_map_1.npy")
# print(contact_map[1])
# print(student_answer[1])
# print(contact_map-student_answer)
# pdbp.get_contact_map("A")
b = pdbp.get_bfactors("A")
a = np.load("/Users/wangyu/Documents/Protein_prediction/exercise/pp1ss19exercise2-exercise-ge56sen/tests/bfactors_1.npy")
print(a.shape)
print(a)
print(np.where(a!=b))
return None
if __name__ == '__main__':
main()<file_sep>import numpy as np
MATRICES = {
'blosum': {
'A': {'A': 4, 'C': 0, 'B': -2, 'E': -1, 'D': -2, 'G': 0, 'F': -2, 'I': -1, 'H': -2, 'K': -1, 'M': -1, 'L': -1, 'N': -2, 'Q': -1, 'P': -1, 'S': 1, 'R': -1, 'T': 0, 'W': -3, 'V': 0, 'Y': -2, 'X': 0, 'Z': -1},
'C': {'A': 0, 'C': 9, 'B': -3, 'E': -4, 'D': -3, 'G': -3, 'F': -2, 'I': -1, 'H': -3, 'K': -3, 'M': -1, 'L': -1, 'N': -3, 'Q': -3, 'P': -3, 'S': -1, 'R': -3, 'T': -1, 'W': -2, 'V': -1, 'Y': -2, 'X': -2, 'Z': -3},
'B': {'A': -2, 'C': -3, 'B': 4, 'E': 1, 'D': 4, 'G': -1, 'F': -3, 'I': -3, 'H': 0, 'K': 0, 'M': -3, 'L': -4, 'N': 3, 'Q': 0, 'P': -2, 'S': 0, 'R': -1, 'T': -1, 'W': -4, 'V': -3, 'Y': -3, 'X': -1, 'Z': 1},
'E': {'A': -1, 'C': -4, 'B': 1, 'E': 5, 'D': 2, 'G': -2, 'F': -3, 'I': -3, 'H': 0, 'K': 1, 'M': -2, 'L': -3, 'N': 0, 'Q': 2, 'P': -1, 'S': 0, 'R': 0, 'T': -1, 'W': -3, 'V': -2, 'Y': -2, 'X': -1, 'Z': 4},
'D': {'A': -2, 'C': -3, 'B': 4, 'E': 2, 'D': 6, 'G': -1, 'F': -3, 'I': -3, 'H': -1, 'K': -1, 'M': -3, 'L': -4, 'N': 1, 'Q': 0, 'P': -1, 'S': 0, 'R': -2, 'T': -1, 'W': -4, 'V': -3, 'Y': -3, 'X': -1, 'Z': 1},
'G': {'A': 0, 'C': -3, 'B': -1, 'E': -2, 'D': -1, 'G': 6, 'F': -3, 'I': -4, 'H': -2, 'K': -2, 'M': -3, 'L': -4, 'N': 0, 'Q': -2, 'P': -2, 'S': 0, 'R': -2, 'T': -2, 'W': -2, 'V': -3, 'Y': -3, 'X': -1, 'Z': -2},
'F': {'A': -2, 'C': -2, 'B': -3, 'E': -3, 'D': -3, 'G': -3, 'F': 6, 'I': 0, 'H': -1, 'K': -3, 'M': 0, 'L': 0, 'N': -3, 'Q': -3, 'P': -4, 'S': -2, 'R': -3, 'T': -2, 'W': 1, 'V': -1, 'Y': 3, 'X': -1, 'Z': -3},
'I': {'A': -1, 'C': -1, 'B': -3, 'E': -3, 'D': -3, 'G': -4, 'F': 0, 'I': 4, 'H': -3, 'K': -3, 'M': 1, 'L': 2, 'N': -3, 'Q': -3, 'P': -3, 'S': -2, 'R': -3, 'T': -1, 'W': -3, 'V': 3, 'Y': -1, 'X': -1, 'Z': -3},
'H': {'A': -2, 'C': -3, 'B': 0, 'E': 0, 'D': -1, 'G': -2, 'F': -1, 'I': -3, 'H': 8, 'K': -1, 'M': -2, 'L': -3, 'N': 1, 'Q': 0, 'P': -2, 'S': -1, 'R': 0, 'T': -2, 'W': -2, 'V': -3, 'Y': 2, 'X': -1, 'Z': 0},
'K': {'A': -1, 'C': -3, 'B': 0, 'E': 1, 'D': -1, 'G': -2, 'F': -3, 'I': -3, 'H': -1, 'K': 5, 'M': -1, 'L': -2, 'N': 0, 'Q': 1, 'P': -1, 'S': 0, 'R': 2, 'T': -1, 'W': -3, 'V': -2, 'Y': -2, 'X': -1, 'Z': 1},
'M': {'A': -1, 'C': -1, 'B': -3, 'E': -2, 'D': -3, 'G': -3, 'F': 0, 'I': 1, 'H': -2, 'K': -1, 'M': 5, 'L': 2, 'N': -2, 'Q': 0, 'P': -2, 'S': -1, 'R': -1, 'T': -1, 'W': -1, 'V': 1, 'Y': -1, 'X': -1, 'Z': -1},
'L': {'A': -1, 'C': -1, 'B': -4, 'E': -3, 'D': -4, 'G': -4, 'F': 0, 'I': 2, 'H': -3, 'K': -2, 'M': 2, 'L': 4, 'N': -3, 'Q': -2, 'P': -3, 'S': -2, 'R': -2, 'T': -1, 'W': -2, 'V': 1, 'Y': -1, 'X': -1, 'Z': -3},
'N': {'A': -2, 'C': -3, 'B': 3, 'E': 0, 'D': 1, 'G': 0, 'F': -3, 'I': -3, 'H': 1, 'K': 0, 'M': -2, 'L': -3, 'N': 6, 'Q': 0, 'P': -2, 'S': 1, 'R': 0, 'T': 0, 'W': -4, 'V': -3, 'Y': -2, 'X': -1, 'Z': 0},
'Q': {'A': -1, 'C': -3, 'B': 0, 'E': 2, 'D': 0, 'G': -2, 'F': -3, 'I': -3, 'H': 0, 'K': 1, 'M': 0, 'L': -2, 'N': 0, 'Q': 5, 'P': -1, 'S': 0, 'R': 1, 'T': -1, 'W': -2, 'V': -2, 'Y': -1, 'X': -1, 'Z': 3},
'P': {'A': -1, 'C': -3, 'B': -2, 'E': -1, 'D': -1, 'G': -2, 'F': -4, 'I': -3, 'H': -2, 'K': -1, 'M': -2, 'L': -3, 'N': -2, 'Q': -1, 'P': 7, 'S': -1, 'R': -2, 'T': -1, 'W': -4, 'V': -2, 'Y': -3, 'X': -2, 'Z': -1},
'S': {'A': 1, 'C': -1, 'B': 0, 'E': 0, 'D': 0, 'G': 0, 'F': -2, 'I': -2, 'H': -1, 'K': 0, 'M': -1, 'L': -2, 'N': 1, 'Q': 0, 'P': -1, 'S': 4, 'R': -1, 'T': 1, 'W': -3, 'V': -2, 'Y': -2, 'X': 0, 'Z': 0},
'R': {'A': -1, 'C': -3, 'B': -1, 'E': 0, 'D': -2, 'G': -2, 'F': -3, 'I': -3, 'H': 0, 'K': 2, 'M': -1, 'L': -2, 'N': 0, 'Q': 1, 'P': -2, 'S': -1, 'R': 5, 'T': -1, 'W': -3, 'V': -3, 'Y': -2, 'X': -1, 'Z': 0},
'T': {'A': 0, 'C': -1, 'B': -1, 'E': -1, 'D': -1, 'G': -2, 'F': -2, 'I': -1, 'H': -2, 'K': -1, 'M': -1, 'L': -1, 'N': 0, 'Q': -1, 'P': -1, 'S': 1, 'R': -1, 'T': 5, 'W': -2, 'V': 0, 'Y': -2, 'X': 0, 'Z': -1},
'W': {'A': -3, 'C': -2, 'B': -4, 'E': -3, 'D': -4, 'G': -2, 'F': 1, 'I': -3, 'H': -2, 'K': -3, 'M': -1, 'L': -2, 'N': -4, 'Q': -2, 'P': -4, 'S': -3, 'R': -3, 'T': -2, 'W': 11, 'V': -3, 'Y': 2, 'X': -2, 'Z': -3},
'V': {'A': 0, 'C': -1, 'B': -3, 'E': -2, 'D': -3, 'G': -3, 'F': -1, 'I': 3, 'H': -3, 'K': -2, 'M': 1, 'L': 1, 'N': -3, 'Q': -2, 'P': -2, 'S': -2, 'R': -3, 'T': 0, 'W': -3, 'V': 4, 'Y': -1, 'X': -1, 'Z': -2},
'Y': {'A': -2, 'C': -2, 'B': -3, 'E': -2, 'D': -3, 'G': -3, 'F': 3, 'I': -1, 'H': 2, 'K': -2, 'M': -1, 'L': -1, 'N': -2, 'Q': -1, 'P': -3, 'S': -2, 'R': -2, 'T': -2, 'W': 2, 'V': -1, 'Y': 7, 'X': -1, 'Z': -2},
'X': {'A': 0, 'C': -2, 'B': -1, 'E': -1, 'D': -1, 'G': -1, 'F': -1, 'I': -1, 'H': -1, 'K': -1, 'M': -1, 'L': -1, 'N': -1, 'Q': -1, 'P': -2, 'S': 0, 'R': -1, 'T': 0, 'W': -2, 'V': -1, 'Y': -1, 'X': -1, 'Z': -1},
'Z': {'A': -1, 'C': -3, 'B': 1, 'E': 4, 'D': 1, 'G': -2, 'F': -3, 'I': -3, 'H': 0, 'K': 1, 'M': -1, 'L': -3, 'N': 0, 'Q': 3, 'P': -1, 'S': 0, 'R': 0, 'T': -1, 'W': -3, 'V': -2, 'Y': -2, 'X': -1, 'Z': 4}
},
'identity': {
'A': {'A': 1, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0, 'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'R': {'A': 0, 'R': 1, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0, 'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'N': {'A': 0, 'R': 0, 'N': 1, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0, 'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'D': {'A': 0, 'R': 0, 'N': 0, 'D': 1, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0, 'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'C': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 1, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0, 'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'E': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 1, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0, 'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'Q': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 1, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0, 'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'G': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 1, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0, 'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'H': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 1, 'I': 0, 'L': 0, 'K': 0, 'M': 0, 'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'I': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 1, 'L': 0, 'K': 0, 'M': 0, 'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'L': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 1, 'K': 0, 'M': 0, 'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'K': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 1, 'M': 0, 'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'M': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 1, 'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'F': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0, 'F': 1, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'P': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0, 'F': 0, 'P': 1, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'S': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0, 'F': 0, 'P': 0, 'S': 1, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'T': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0, 'F': 0, 'P': 0, 'S': 0, 'T': 1, 'W': 0, 'Y': 0, 'V': 0},
'W': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0, 'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 1, 'Y': 0, 'V': 0},
'Y': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0, 'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 1, 'V': 0},
'V': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0, 'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 1}
}
}
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.alignments = self.align()
debug = True
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
for i in range(len(self.string2)+1):
for j in range(len(self.string1)+1):
if i == 0 and j == 0:
self.score_matrix[i, j] = 0
else:
self.score_matrix[i, j] = self.calc_max(i, j)
i, j = np.unravel_index(np.argmax(self.score_matrix, axis=None), self.score_matrix.shape)
self.high_point = [i,j]
alignments = self.calc_alignments(i, j, [('', '')])
return alignments
def get_prevs(self, i, j):
prev = []
if i > 0 and j > 0:
if self.score_matrix[i-1, j-1] + self.substitution_matrix[self.string2[i-1]][self.string1[j-1]] == self.score_matrix[i,j]:
prev.append((-1, -1))
if j > 0 and self.score_matrix[i, j - 1] + self.gap_penalty == self.score_matrix[i,j]:
prev.append((0, -1))
if i > 0 and self.score_matrix[i-1, j] + self.gap_penalty == self.score_matrix[i,j]:
prev.append((-1, 0))
return prev
def calc_alignments(self, i, j, tpls):
result = []
if self.score_matrix[i,j] == 0:
self.low_point = [i,j]
res = []
for tpl in tpls:
a = list(tpl[0])
a.reverse()
a = ''.join(a)
b = list(tpl[1])
b.reverse()
b = ''.join(b)
res.append((a, b))
result.extend(res)
else:
for num, tpl in enumerate(tpls):
prevs = self.get_prevs(i, j)
for prev in prevs:
if prev == (-1, -1):
result.extend(self.calc_alignments(i-1, j-1, [(tpl[0]+ self.string1[j-1], tpl[1]+self.string2[i-1])]))
if prev == (-1, 0):
result.extend(self.calc_alignments(i-1, j, [(tpl[0]+'-', tpl[1]+self.string2[i-1])]))
if prev == (0, -1):
result.extend(self.calc_alignments(i, j-1, [(tpl[0]+self.string1[j-1], tpl[1]+'-')]))
return result
def calc_max(self, i , j):
results = [0]
if i > 0 and j > 0:
res = self.score_matrix[i-1, j-1] + self.substitution_matrix[self.string2[i-1]][self.string1[j-1]]
results.append(res)
else:
results.append(-np.inf)
if j > 0:
res = self.score_matrix[i, j-1] + self.gap_penalty
results.append(res)
else:
results.append(-np.inf)
if i > 0:
res = self.score_matrix[i-1, j] + self.gap_penalty
results.append(res)
else:
results.append(-np.inf)
return max(results)
def has_alignment(self):
"""
:return: True if a local alignment has been found, False otherwise
"""
return len(self.alignments[0][0]) > 0
def get_alignment(self):
"""
:return: alignment represented as a tuple of aligned strings
"""
return self.alignments[0]
def is_residue_aligned(self, string_number, residue_index):
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been alined
False otherwise
"""
if string_number == 1:
result = self.low_point[1] <= residue_index < self.high_point[1] # and \
# self.string1[residue_index+self.low_point[1]] == self.string2[residue_index+self.low_point[0]]
else:
result = self.low_point[0] <= residue_index < self.high_point[0] # and \
# self.string1[residue_index+self.low_point[1]] == self.string2[residue_index+self.low_point[0]]
return (not (not (self.low_point[1] <= residue_index < self.high_point[1] - 1 and string_number == 1) and not (
self.low_point[0] <= residue_index < self.high_point[0] and string_number == 2)))
if __name__ == '__main__':
test = LocalAlignment("ARNDCEQGHI", "DDCEQHG", -6, MATRICES['blosum'])<file_sep>import numpy as np
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
# for each cell store from which other cells it was reached (can be multiple)
self.path_matrix = [[[] for _ in range(len(string1) + 1)] for _ in range(len(string2) + 1)]
self.align()
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
subst = self.substitution_matrix
scores = self.score_matrix
path = self.path_matrix
gap = self.gap_penalty
# fill first row and column with 0, -1, -2, ...
for i in range(len(self.string1) + 1):
scores[0][i] = i * gap
for i in range(len(self.string2) + 1):
scores[i][0] = i * gap
# fill other cells, indices are on strings (add 1 for scores)
for s1 in range(len(self.string1)):
for s2 in range(len(self.string2)):
s1_char = self.string1[s1]
s2_char = self.string2[s2]
# compute scores
diag = scores[s2][s1] + subst[s1_char][s2_char]
vert = scores[s2+1][s1] + gap
horz = scores[s2][s1+1] + gap
# update best score
score = max(diag, vert, horz)
scores[s2+1][s1+1] = score
# update path: save from which cells this one can be reached
coming_from = []
if diag == score:
coming_from.append((s2, s1))
if vert == score:
coming_from.append((s2+1, s1))
if horz == score:
coming_from.append((s2, s1+1))
path[s2+1][s1+1] = coming_from
def get_best_score(self):
"""
:return: the highest score for the aligned strings, int
"""
return self.score_matrix[len(self.string2)][len(self.string1)]
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score
"""
return self.get_number_of_alignments_rec(len(self.string2), len(self.string1))
def get_number_of_alignments_rec(self, f2, f1):
"""
Starting from field (s2, s1), recursively get the number of alignments
"""
# recursion stop
if f2 == 0 or f1 == 0:
return 1
# sum num paths of all fields that lead to this one
num_paths = 0
coming_from = self.path_matrix[f2][f1]
for f2_prev, f1_prev in coming_from:
num_paths += self.get_number_of_alignments_rec(f2_prev, f1_prev)
return num_paths
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
return self.get_alignments_rec(len(self.string2), len(self.string1))
def get_alignments_rec(self, f2, f1):
"""
Starting from field (s2, s1), recursively get list if alignments
"""
# edge cases: top row / left column
if f1 == 0 and f2 == 0:
return [('', '')]
if f2 == 0: # move back horizontally (top row)
s1_char = self.string1[f1-1]
a1, a2 = self.get_alignments_rec(f2, f1-1)[0]
return [(a1 + s1_char, a2 + '-')]
if f1 == 0: # move back vertically (left column)
s2_char = self.string2[f2-1]
a1, a2 = self.get_alignments_rec(f2-1, f1)[0]
return [(a1 + '-', a2 + s2_char)]
# somewhere in the middle
coming_from = self.path_matrix[f2][f1]
s1_char = self.string1[f1-1]
s2_char = self.string2[f2-1]
alignments = []
# get alignments from every path leading here
# append characters based on direction
for f2_prev, f1_prev in coming_from:
prev_alignments = self.get_alignments_rec(f2_prev, f1_prev)
if f2_prev + 1 == f2 and f1_prev + 1 == f1:
# coming from diagonal -> append chars to both strings: (X, Y) -> (Xa, Yb)
prev_alignments = list(map(lambda al: (al[0] + s1_char, al[1] + s2_char), prev_alignments))
elif f2_prev == f2 and f1_prev + 1 == f1:
# coming from horizontal -> append char only to string 1: (X, Y) -> (Xa, Y-)
prev_alignments = list(map(lambda al: (al[0] + s1_char, al[1] + '-'), prev_alignments))
else:
# coming from vertical -> append char only to string 2: (X, Y) -> (X-, Yb)
prev_alignments = list(map(lambda al: (al[0] + '-', al[1] + s2_char), prev_alignments))
alignments.extend(prev_alignments) # add previous extended alignments
return alignments
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as a list of lists
"""
return self.score_matrix
<file_sep>import numpy as np
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.align()
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
# Fill first row and column with cumulative gap penalty
rows = len(self.string2) + 1
columns = len(self.string1) + 1
for i in range(1, rows):
self.score_matrix[i][0] = self.score_matrix[i - 1][0] + self.gap_penalty
for i in range(1, columns):
self.score_matrix[0][i] = self.score_matrix[0][i - 1] + self.gap_penalty
# Fill consecutive cells
for m in range(1, rows):
for n in range(1, columns):
sub = max(self.score_matrix[m - 1][n] + self.gap_penalty,
self.score_matrix[m][n - 1] + self.gap_penalty)
sub = max(sub, self.score_matrix[m - 1][n - 1] +
self.substitution_matrix[self.string1[n - 1]][self.string2[m - 1]])
self.score_matrix[m][n] = sub
return
def get_best_score(self):
"""
:return: the highest score for the aligned strings, int
"""
return self.score_matrix[len(self.string2)][len(self.string1)]
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score
"""
return len(self.get_alignments())
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
# Build assignment tree
x = len(self.string2)
y = len(self.string1)
root = AlignmentTreeNode(self.score_matrix[x][y], x, y, self.string1, self.string2, self.score_matrix,
self.substitution_matrix, self.gap_penalty)
root.find_children()
ret = root.get_alignments()
print(ret)
return self.translate_tuples(ret)
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as a list of lists
"""
return self.score_matrix.tolist()
def translate_tuples(self, tuples):
# Split list after (1, 1) elements
paths = [[]]
count = 0
for t in tuples:
if t[2] != 1 and t[3] != 1:
paths[count].append(t)
else:
paths[count].append(t)
count += 1
paths.append([])
paths.pop(count)
print(paths)
# Create full alignment from first paths
ret1 = ''
ret2 = ''
for t in paths[0]:
ret1 += t[0]
ret2 += t[1]
paths[0] = (ret1, ret2)
# For the following alignments use first path as basis
if count >= 1:
for i in range(1, len(paths)):
ret1 = ''
ret2 = ''
for t in paths[i]:
ret1 += t[0]
ret2 += t[1]
paths[i] = (ret1, ret2)
# Flip all results
for i in range(0, len(paths)):
paths[i] = (paths[i][0][::-1], paths[i][1][::-1])
for i in range(1, len(paths)):
tmp1 = paths[0][0]
tmp2 = paths[0][1]
str1 = paths[i][0]
str2 = paths[i][1]
if str1[len(str1) - 1] == '-':
tmp1 = str1 + tmp1[len(str1) - 1] + tmp1[len(str1) + 1:]
else:
tmp1 = str1 + tmp1[len(str1):]
if str2[len(str2) - 1] == '-':
tmp2 = str2 + tmp2[len(str2) - 1] + tmp2[len(str2) + 1:]
else:
tmp2 = str2 + tmp2[len(str2):]
paths[i] = (tmp1, tmp2)
print(paths)
return paths
class AlignmentTreeNode:
left_child = None
upper_child = None
diagonal_child = None
def __init__(self, value, x, y, string1, string2, score_matrix, subs_matrix, gap_penalty):
self.value = value
self.x = x
self.y = y
self.string1 = string1
self.string2 = string2
self.score_matrix = score_matrix
self.subs_matrix = subs_matrix
self.gap_penalty = gap_penalty
def find_children(self):
if not self.find_children:
return
try:
self.left_child = AlignmentTreeNode(self.score_matrix[self.x][self.y - 1], self.x, self.y - 1,
self.string1, self.string2, self.score_matrix,
self.subs_matrix, self.gap_penalty)
except IndexError:
self.left_child = None
try:
self.upper_child = AlignmentTreeNode(self.score_matrix[self.x - 1][self.y], self.x - 1, self.y,
self.string1, self.string2, self.score_matrix,
self.subs_matrix, self.gap_penalty)
except IndexError:
self.upper_child = None
try:
self.diagonal_child = AlignmentTreeNode(self.score_matrix[self.x - 1][self.y - 1], self.x - 1, self.y - 1,
self.string1, self.string2, self.score_matrix,
self.subs_matrix, self.gap_penalty)
except IndexError:
self.diagonal_child = None
def get_left_child(self):
return self.left_child
def get_upper_child(self):
return self.upper_child
def get_diagonal_child(self):
return self.diagonal_child
def get_value(self):
return self.value
def get_string_match(self):
return self.subs_matrix[self.string1[self.y - 1]][self.string2[self.x - 1]]
def get_alignments(self):
if self.x <= 0 or self.y <= 0:
ret = []
else:
ret = [(self.string1[self.y - 1], self.string2[self.x - 1], self.y, self.x)]
self.find_children()
if self.value == self.get_left_child().get_value() + self.gap_penalty:
ret[0] = (ret[0][0], '-', self.y, self.x)
ret += self.get_left_child().get_alignments()
if self.value == self.get_diagonal_child().get_value() \
+ self.get_string_match():
ret += self.get_diagonal_child().get_alignments()
if self.value == self.get_upper_child().get_value() + self.gap_penalty:
ret[0] = ('-', ret[0][1], self.y, self.x)
ret += self.get_upper_child().get_alignments()
return ret
<file_sep>import numpy as np
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.align()
def find_alignments(self, pos1, pos2):
temp = self.score_matrix[pos1, pos2]
if temp == 0:
return False
self.alignments.append((pos1, pos2))
if temp == (self.score_matrix[pos1 - 1, pos2] + self.gap_penalty):
self.find_alignments(pos1 - 1, pos2)
return True
if temp == (self.score_matrix[pos1, pos2 - 1] + self.gap_penalty):
self.find_alignments(pos1, pos2 - 1)
return True
if temp == (self.score_matrix[pos1 - 1, pos2 - 1] + self.substitution_matrix[self.string2[pos1 - 1]][self.string1[pos2 - 1]]):
self.find_alignments(pos1 - 1, pos2 - 1)
return True
return True
def align(self):
"""
Align given strings using the Smith-Waterman algorithm.
NB: score matrix and the substitution matrix are different matrices!
"""
# Initialize the first column and the first row
for i in range(1, len(self.string2) + 1):
self.score_matrix[i][0] = 0
for i in range(1, len(self.string1) + 1):
self.score_matrix[0][i] = 0
# Compute the rest of the score matrix
for i in range(1, len(self.string2) + 1):
for j in range(1, len(self.string1) + 1):
temp = []
temp.append(0)
temp.append(self.score_matrix[i][j - 1] + self.gap_penalty)
temp.append(self.score_matrix[i -1][j] + self.gap_penalty)
temp.append(self.score_matrix[i - 1][j - 1] + self.substitution_matrix[self.string2[i - 1]][self.string1[j - 1]])
self.score_matrix[i][j] = np.max(temp)
# Compute the optimal alignment
self.alignments = []
i,j = np.unravel_index(self.score_matrix.argmax(), self.score_matrix.shape)
self.find_alignments(i, j)
def has_alignment(self):
"""
:return: True if a local alignment has been found, False otherwise
"""
if len(self.alignments) == 0:
return False
else:
return True
def get_alignment(self):
"""
:return: alignment represented as a tuple of aligned strings
"""
res1 = ""
res2 = ""
length = len(self.alignments)
for i in range(1, len(self.alignments) + 1):
res1 += self.string1[self.alignments[length - i][1] - 1]
res2 += self.string2[self.alignments[length - i][0] - 1]
return (res1, res2)
def is_residue_aligned(self, string_number, residue_index):
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been aligned
False otherwise
"""
if string_number == 1:
index = 1
elif string_number == 2:
index = 0
res = False
for pos in self.alignments:
if residue_index == (pos[index] - 1):
res = True
return res<file_sep>##############
# 1.4 Genetic Code
##############
complementary_dict = {
'A': 'T',
'T': 'A',
'G': 'C',
'C': 'G'
}
codon_dict = {
'GCT': 'A',
'GCC': 'A',
'GCA': 'A',
'GCG': 'A',
'TTA': 'L',
'TTG': 'L',
'CTT': 'L',
'CTC': 'L',
'CTA': 'L',
'CTG': 'L',
'CGT': 'R',
'CGC': 'R',
'CGA': 'R',
'CGG': 'R',
'AGA': 'R',
'AGG': 'R',
'AAA': 'K',
'AAG': 'K',
'AAT': 'N',
'AAC': 'N',
'ATG': 'M',
'GAT': 'D',
'GAC': 'D',
'TTT': 'F',
'TTC': 'F',
'TGT': 'C',
'TGC': 'C',
'CCT': 'P',
'CCC': 'P',
'CCA': 'P',
'CCG': 'P',
'CAA': 'Q',
'CAG': 'Q',
'TCT': 'S',
'TCC': 'S',
'TCA': 'S',
'TCG': 'S',
'AGT': 'S',
'AGC': 'S',
'GAA': 'E',
'GAG': 'E',
'ACT': 'T',
'ACC': 'T',
'ACA': 'T',
'ACG': 'T',
'GGT': 'G',
'GGC': 'G',
'GGA': 'G',
'GGG': 'G',
'TGG': 'W',
'CAT': 'H',
'CAC': 'H',
'TAT': 'Y',
'TAC': 'Y',
'ATT': 'I',
'ATC': 'I',
'ATA': 'I',
'GTT': 'V',
'GTC': 'V',
'GTA': 'V',
'GTG': 'V',
# stop codons
'TAA': '',
'TGA': '',
'TAG': ''
}
def codons_to_aa(orf):
""" Translate an ORF (sequence of codon triplets) to a sequence of amino acid. """
if len(orf) % 3 is not 0:
print('len(orf) % 3 is not 0')
return None
codons = [orf[i:i+3] for i in range(0, len(orf), 3)]
aa_seq = ''.join(codon_dict[c] for c in codons)
return aa_seq
def complementary_strand(strand):
""" Return the complimentary strand """
return ''.join(complementary_dict[s] for s in strand)
def rev_complementary_strand(strand):
""" Return the reversed complimentary strand """
rev_strand = strand[::-1]
return complementary_strand(rev_strand)
def get_orfs_in_reading_frame(seq, offset, reverse_comp, length):
"""
Loop over the reading frame and find all ORFs
Return a list of found ORFs as tuples: (start, stop, aa_seq, reverse_comp)
"""
codons = [seq[i:i+3] for i in range(0, len(seq), 3)]
orfs = []
orf_started = False
orf_start_index = -1
for i in range(0, len(seq), 3):
codon = seq[i:i+3]
if codon == 'ATG' and not orf_started:
orf_started = True
orf_start_index = i
continue
if (codon == 'TAA' or codon == 'TAG' or codon == 'TGA') and orf_started:
orf_started = False
orf_stop_index = i+2
orf = seq[orf_start_index:orf_stop_index+1]
aa_seq = codons_to_aa(orf)
if len(aa_seq) < 34:
continue
orfs.append(( index_magic(orf_start_index, offset, reverse_comp, length), index_magic(orf_stop_index, offset, reverse_comp, length), aa_seq, reverse_comp))
return orfs
def index_magic(index, offset, reverse_comp, length):
"""
Maps the index of a modified sequence (shifted by offset, reversed and extended)
to the index in the original sequence.
Parameters:
- index: index in the modified sequence
- offser: how many characters were dropped from the start of original sequence
- reverse_comp: True if modified sequence was reversed
- length: length of the original sequence
"""
index = (index + offset) % length
if not reverse_comp:
return index
else:
return length - index - 1
def rm_substrings(orfs):
""" Remove ORFs that are parts of longer ORFs in the list. """
cleaned = []
for orf1 in orfs:
add_orf1 = True
for orf2 in orfs:
if is_part_of(orf1, orf2):
add_orf1 = False
if add_orf1: # orf1 is not part of any other
cleaned.append(orf1)
return cleaned
def rm_equals(orfs):
""" Remove duplicate results from the list of ORFs. """
cleaned = []
for new in orfs:
add_new = True
for existing in cleaned:
if orf_equal(new, existing):
add_new = False
if add_new:
cleaned.append(new)
return cleaned
def orf_equal(orf1, orf2):
"""
Return True if orf1 equals orf2.
It is sufficent to compare start, stop and the reading direction.
"""
return orf1[0] == orf2[0] and orf1[1] == orf2[1] and orf1[3] == orf2[3]
def is_part_of(orf1, orf2):
"""
Return True if orf1 is part of orf2.
This is the case when both ORFs have the same stop index, the same reading
direction (primary or reversed strand) and orf1 is shorter that orf2.
"""
return orf1[1] == orf2[1] and orf1[3] == orf2[3] and len(orf1[2]) < len(orf2[2])
def get_orfs(genome):
"""
Find all ORFs encoding proteins longer than 33 in the input sequence.
If multiple ORFs overlap and end with the same stop codon, only the longest will be returned.
The input sequence is assumed to be circular DNA.
Parameters: DNA sequence (invalid sequences will throw a TypeError)
Returns a list of tuples: (start, stop, aa_seq, reverse_comp)
- start: position of the first DNA residue
- stop: position of the last DNA residue (including stop codon)
- aa_seq: translated amino acid sequence as a single string
- reverse_comp: flag which is True if the ORF is parsed from the reversed strand
"""
genome = genome.upper()
for c in genome:
if c != 'A' and c != 'T' and c != 'G' and c != 'C':
raise TypeError("Invalid genome")
genome_len = len(genome)
genome = genome + genome # replicate twice for circular DNA
rc_genome = rev_complementary_strand(genome) # reversed complimentary
# find ORFs in all reading frame
orfs = get_orfs_in_reading_frame(genome, 0, False, genome_len)
orfs += get_orfs_in_reading_frame(genome[1:], 1, False, genome_len)
orfs += get_orfs_in_reading_frame(genome[2:], 2, False, genome_len)
orfs += get_orfs_in_reading_frame(rc_genome, 0, True, genome_len)
orfs += get_orfs_in_reading_frame(rc_genome[1:], 1, True, genome_len)
orfs += get_orfs_in_reading_frame(rc_genome[2:], 2, True, genome_len)
orfs = rm_equals(orfs)
orfs = rm_substrings(orfs)
return orfs
<file_sep>import numpy as np
import re
from pathlib import Path
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.different_words = []
self.Sequences = []
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
self.Sequences.append(sequence)
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
seq_with_word = []
for seq in self.Sequences:
if str(seq).__contains__(word):
seq_with_word.append(seq)
return seq_with_word
def getSubString(self, RNA, position):
return [RNA[i:i + 3] for i in range(position, len(RNA) - 2, 3)]
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
different_words = set
num_of_words_perseq = 0
for seq in self.Sequences:
substrings = set(self.getSubString(seq, 0)).union(set(self.getSubString(seq, 1)).union(set(self.getSubString(seq, 2))))
different_words = different_words.union(substrings)
different_words_perseq = substrings
num_of_words_perseq += len(set(different_words_perseq))
different_words = set(different_words)
number_seq_perword = 0
for word in different_words:
for seq in self.Sequences:
if str(seq).__contains__(word):
number_seq_perword += 1
num_of_words = len(different_words)
return (len(self.Sequences), num_of_words, int(round(num_of_words_perseq/len(self.Sequences))), int(round(number_seq_perword/num_of_words)))
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.substitution_matrix = substitution_matrix
self.query_indices = dict()
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
words_threshold = []
unique_words = []
scores = []
if sequence is not None:
for ch_index in range(len(sequence)-2):
word = sequence[ch_index:ch_index + 3]
if not unique_words.__contains__(word):
unique_words.append(word)
for ch in ALPHABET:
for ch2 in ALPHABET:
for ch3 in ALPHABET:
modified_word = ch + ch2 + ch3
score = self.substitution_matrix[AA_TO_INT[word[0]]][AA_TO_INT[modified_word[0]]] + \
self.substitution_matrix[AA_TO_INT[word[1]]][AA_TO_INT[modified_word[1]]] + \
self.substitution_matrix[AA_TO_INT[word[2]]][AA_TO_INT[modified_word[2]]]
if score >= T:
if modified_word not in words_threshold:
words_threshold.append(modified_word)
self.query_indices[modified_word] = [ch_index]
scores.append(score)
else:
self.query_indices[modified_word] = self.query_indices[modified_word] + [ch_index]
if pssm is not None:
for ch_index in range(len(pssm)-2):
for ch in ALPHABET:
for ch2 in ALPHABET:
for ch3 in ALPHABET:
score = pssm[ch_index][AA_TO_INT[ch]] + pssm[ch_index + 1][AA_TO_INT[ch2]] + pssm[ch_index + 2][AA_TO_INT[ch3]]
if score >= T:
modified_word = ch + ch2 + ch3
if modified_word not in words_threshold:
words_threshold.append(modified_word)
self.query_indices[modified_word] = [ch_index]
scores.append(score)
else:
self.query_indices[modified_word] = self.query_indices[modified_word] + [ch_index]
return words_threshold
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
if query is not None:
words = self.get_words(sequence=query, T=T)
for word in words:
#Get all sequences in DB containing word
sequences_with_word = blast_db.get_sequences(word)
#Initial score of the word before adding
initial_score = self.substitution_matrix[AA_TO_INT[word[0]]][AA_TO_INT[word[0]]] + self.substitution_matrix[AA_TO_INT[word[1]]][AA_TO_INT[word[1]]] + self.substitution_matrix[AA_TO_INT[word[2]]][AA_TO_INT[word[2]]]
for seq in sequences_with_word:
indices_target = []
for i in range(len(seq) - 2):
if seq[i:i+3] == word:
indices_target.append(i)
indices_query = []
for i in range(len(query) - 2):
if query[i:i+3] == word:
indices_query.append(i)
indices_query = indices_query + self.query_indices[word]
for i in range(len(indices_target)):
temp = i
for j in range(len(indices_query)):
# Variables
highscore_bool = False
query_index = indices_query[j]
start_query_index = query_index
end_query_index = query_index + 2
target_index = indices_target[temp]
start_target_index = target_index
end_target_index = target_index + 2
highest_score = initial_score
score = initial_score
#Loop right side of the word till score = highest score - 5
while target_index < len(seq) - 3 and query_index < len(query) - 3:
score += self.substitution_matrix[AA_TO_INT[seq[target_index + 3]]][AA_TO_INT[query[query_index + 3]]]
if score > highest_score:
highscore_bool = True
highest_score = score
end_query_index = query_index + 3
end_target_index = target_index + 3
if score <= (highest_score - X):
break
target_index += 1
query_index += 1
#reset score and iteration indices
score = highest_score
query_index = indices_query[j]
target_index = indices_target[temp]
#loop left side of the word till score = highest score - 5
while target_index > 0 and query_index > 0:
score += self.substitution_matrix[AA_TO_INT[seq[target_index - 1]]][AA_TO_INT[query[query_index - 1]]]
if score > highest_score:
highest_score = score
start_query_index = query_index - 1
start_target_index = target_index - 1
if score <= (highest_score - X):
break
target_index -= 1
query_index -= 1
subseq1 = seq[start_target_index: end_target_index + 1]
subseq2 = query[start_query_index: end_query_index + 1]
length = len(subseq1)
if len(subseq1) == len(subseq2):
score = 0
highest_score = 0
for i in range(len(subseq1)):
score += self.substitution_matrix[AA_TO_INT[subseq1[i]]][AA_TO_INT[subseq2[i]]]
highest_score = score
if highest_score >= S:
if seq not in d:
d[seq] = [(start_query_index, start_target_index, length, highest_score)]
else:
if not d[seq].__contains__((start_query_index, start_target_index, length, highest_score)):
d[seq] = sorted(d[seq] + [(start_query_index, start_target_index, length, highest_score)])
if pssm is not None:
words = self.get_words(pssm=pssm, T=T)
return d
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]
<file_sep>##############
# 1.5 Amino Acid Distribution
##############
from collections import Counter
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
return len(self.__sequences)
def get_total_length(self):
total_len = 0
for seq in self.__sequences:
total_len += len(seq)
return total_len
def get_average_length(self):
return self.get_total_length() / self.get_counts()
def read_fasta(self, path):
with open(path, 'r') as f:
seq = ''
sequence_started = False
for line in f:
if line.startswith('>') or line.startswith(';'):
if sequence_started:
self.__sequences.append(seq)
seq = ''
sequence_started = False
continue
sequence_started = True
seq += line.strip().replace('*', '')
self.__sequences.append(seq)
def get_abs_frequencies(self):
# return number of occurences not normalized by length
abs_freq = Counter()
for seq in self.__sequences:
abs_freq.update(seq)
return abs_freq
def get_av_frequencies(self):
# return number of occurences normalized by length
av_freq = self.get_abs_frequencies()
total_len = self.get_total_length()
for key in av_freq:
av_freq[key] /= total_len
return av_freq
<file_sep>import numpy as np
import os
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.alignmentList = ()
self.alignmentNbrsStr1 = []
self.alignmentNbrsStr2 = []
self.align()
def align(self):
"""
Align given strings using the Smith-Waterman algorithm.
NB: score matrix and the substitution matrix are different matrices!
"""
for row, residue2 in enumerate(self.string2):
for col, residue1 in enumerate(self.string1):
diagonal = self.score_matrix[row][col] + self.substitution_matrix[residue1][residue2]
gapHorizontal = self.score_matrix[row+1][col] + self.gap_penalty
gapVertical = self.score_matrix[row][col+1] + self.gap_penalty
maxScore = max(diagonal, gapHorizontal, gapVertical, 0)
self.score_matrix[row+1][col+1] = maxScore
print(self.score_matrix)
def has_alignment(self):
"""
:return: True if a local alignment has been found, False otherwise
"""
"""
for row,_ in enumerate(self.string2):
for col,_ in enumerate(self.string1):
if self.score_matrix[row][col] > 0:
print("True")
return True
print("False")
return False
"""
if self.score_matrix.any() > 0:
print("True")
return True
print("False")
return False
def get_alignment(self):
"""
:return: alignment represented as a tuple of aligned strings
"""
if not self.has_alignment():
self.alignmentList = ("","")
print(self.alignmentList)
return self.alignmentList
else:
maxIndex = np.where(self.score_matrix == np.amax(self.score_matrix))
print(int (maxIndex[0]), int (maxIndex[1]))
self.get_optimal_alignment(int (maxIndex[0]), int (maxIndex[1]), ["", ""])
print(self.alignmentList)
return self.alignmentList
def get_optimal_alignment(self, row, col, result):
print("______ROWCOL________")
print(row, col)
if self.score_matrix[row][col] == 0:
alig1 = result[0][::-1]
alig2 = result[1][::-1]
self.alignmentList = (alig1,alig2)
print("appened" , result)
print(self.alignmentList)
result[0] = result[0][:-1]
result[1] = result[1][:-1]
return
else:
#if self.score_matrix[row][col] != 0:
print(self.score_matrix[row][col])
print(self.score_matrix[row-1][col-1])
print(self.string2)
print(self.string1)
print(self.string2[row-1])
print(self.string1[col-1])
print(self.score_matrix[row][col] - self.substitution_matrix[self.string1[col-1]][self.string2[row-1]])
current = self.score_matrix[row][col]
diagonal = self.score_matrix[row-1][col-1]
vertical = self.score_matrix[row-1][col]
horizontal = self.score_matrix[row][col-1]
charString1 = self.string1[col-1]
charString2 = self.string2[row-1]
subst = self.substitution_matrix[charString1][charString2]
#1. Fall diagonal: Wert muss kleiner der substitution matrix sein
if diagonal == (current - subst):
print("Fall 1")
result[0] += charString1
result[1] += charString2
print(result)
self.alignmentNbrsStr1.append((charString1,col-1))
self.alignmentNbrsStr2.append((charString2,row-1))
self.get_optimal_alignment(row-1, col-1, result)
#2. Fall links: Wert - gap_penalty --> vertical
if vertical == (current - self.gap_penalty):
print("Fall 2")
result[0] += ("-")
result[1] += charString2
self.alignmentNbrsStr1.append(("-",-1))
self.alignmentNbrsStr2.append((charString2,row-1))
print(result)
self.get_optimal_alignment(row-1, col, result)
#3. Fall oben: Wert - gap_penalty --> horizontal
if horizontal == (current - self.gap_penalty):
print("Fall 3")
result[0] += charString1
result[1] += ("-")
self.alignmentNbrsStr1.append((charString1, col-1))
self.alignmentNbrsStr2.append(("-", -1))
print(result)
self.get_optimal_alignment(row, col-1, result)
result[0] = result[0][:-1]
result[1] = result[1][:-1]
print("Fall 4")
print(row, col)
return
def is_residue_aligned(self, string_number, residue_index):
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been alined
False otherwise
"""
#string = "string" + str (string_number)
#print("Test: ", string)
print(self.alignmentNbrsStr1)
print(self.alignmentNbrsStr2)
if string_number == 1:
for residue in self.alignmentNbrsStr1:
print(self.string2[residue_index], residue[0], residue_index, residue[1])
if self.string1[residue_index] == residue[0] and residue_index == residue[1]:
print("True")
return True
print("False")
return False
elif string_number == 2:
for residue in self.alignmentNbrsStr2:
print(self.string2[residue_index], residue[0], residue_index, residue[1])
if self.string2[residue_index] == residue[0] and residue_index == residue[1]:
print("True")
return True
print("False")
return False
else:
return
"""
if string_number == 1:
print(self.alignmentList[0], self.string1[residue_index])
if self.string1[residue_index] in self.alignmentList[0]:
print("True")
return True
else:
print("False")
return False
elif string_number == 2:
print(self.alignmentList[1], self.string2[residue_index])
if self.string2[residue_index] in self.alignmentList[1]:
print("True")
return True
else:
print("False")
return False
else:
print("False")
return False
"""
<file_sep>##############
# Exercise 2.6
##############
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
self.__all_seq_length = 0
# (b) get_counts() which counts the number of read sequences and returns the number as integer
def get_counts(self):
return len(self.__sequences)
# (c) get_average_length()which calculates the average sequence length and returns the result
# as float
def get_average_length(self):
for seq in self.__sequences:
self.__all_seq_length += len(seq)
return self.__all_seq_length / len(self.__sequences)
# (a) read_fasta (filename) which takes a filename as parameter and reads in a file
# with multipleprotein sequences in FASTA format
def read_fasta(self, path):
with open(path,'r') as f:
seq=''
sequence_started=False
for line in f:
if line.startswith('>') or line.startswith(';'):
if sequence_started:
self.__sequences.append(seq)
seq=''
sequence_started=False
continue
sequence_started=True
seq+=line.strip().replace('*', '')
self.__sequences.append(seq)
# (d) get_abs_frequencies() which counts the occurrence for every amino acid
# over all proteins
def get_abs_frequencies(self):
# return number of occurences not normalized by length
all_sequences = ''
absolute_frequencies = {
'A': 0,
'R': 0,
'N': 0,
'D': 0,
'C': 0,
'E': 0,
'Q': 0,
'G': 0,
'H': 0,
'I': 0,
'L': 0,
'K': 0,
'M': 0,
'F': 0,
'P': 0,
'S': 0,
'T': 0,
'W': 0,
'Y': 0,
'V': 0,
}
for seq in self.__sequences:
all_sequences += seq
for aa in absolute_frequencies:
absolute_frequencies[aa] += all_sequences.count(aa)
return absolute_frequencies
# (e) get_av_frequencies() which calculates the average amino acid composition over all read
# protein sequences
def get_av_frequencies(self):
# return number of occurences normalized by length
avg_frequencies = {
'A': 0,
'R': 0,
'N': 0,
'D': 0,
'C': 0,
'E': 0,
'Q': 0,
'G': 0,
'H': 0,
'I': 0,
'L': 0,
'K': 0,
'M': 0,
'F': 0,
'P': 0,
'S': 0,
'T': 0,
'W': 0,
'Y': 0,
'V': 0,
}
absolute_frequencies = self.get_abs_frequencies()
for aa in avg_frequencies:
avg_frequencies[aa] = absolute_frequencies[aa] / self.__all_seq_length
return avg_frequencies <file_sep># -*- coding: utf-8 -*-
"""
IMPORTANT!:
Before writing an email asking questions such as
'What does this input has to be like?' or
'What return value do you expect?' PLEASE read our
exercise sheet and the information in this template
carefully.
If something is still unclear, PLEASE talk to your
colleagues before writing an email!
If you experience technical issues or if you find a
bug we are happy to answer your questions. However,
in order to provide quick help in such cases we need
to avoid unnecessary emails such as the examples
shown above.
"""
from Bio import SeqIO # Tip: This module might be useful for parsing...
############ Exercise 3: SwissProt ##########
class SwissProt_Parser:
PARSER = SeqIO
def __init__( self, path, frmt='uniprot-xml' ):
'''
Initialize every SwissProt_Parser with a path to a XML-formatted UniProt file.
An example file is included in the repository (P09616.xml).
Tip: Store the parsed XML entry in an object variable instead of parsing it
again & again ...
'''
self.sp_anno = self.PARSER.read( path, frmt )
self.frmt = frmt
def get_sp_identifier( self ):
return self.sp_anno.id
def get_sp_sequence_length( self ):
return len( self.sp_anno.seq )
def get_organism( self ):
return self.sp_anno.annotations['organism']
def get_localization( self ):
return self.sp_anno.annotations['comment_subcellularlocation_location']
def get_pdb_support( self ):
return [cross_ref.split(':')[1] for cross_ref in self.sp_anno.dbxrefs if 'PDB:' in cross_ref]
def main():
print('SwissProt XML Parser class')
return None
if __name__ == '__main__':
main()
<file_sep>##############
# Exercise 2.6
##############
import os
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
return len(self.__sequences)
def get_average_length(self):
return sum(len(sequence) for sequence in self.__sequences) / len(self.__sequences)
def read_fasta(self, path):
def add_sequence(seq):
if seq[-1:] == '*':
seq = seq[:-1]
self.__sequences.append(seq)
with open(path, 'r') as f:
seq = ''
sequence_started = False
for line in f:
if line.startswith('>') or line.startswith(';'):
if sequence_started:
add_sequence(seq)
seq = ''
sequence_started = False
continue
sequence_started = True
seq += line.strip()
add_sequence(seq)
def get_abs_frequencies(self):
# return number of occurences not normalized by length
frequency_dict = {}
for amino_acid in 'NVSTWKEFLIMCRYPGHQDA':
frequency_dict[amino_acid] = 0
for sequence in self.__sequences:
for amino_acid in sequence:
frequency_dict[amino_acid] += 1
return frequency_dict
def get_av_frequencies(self):
# return number of occurences normalized by length
total_amino_acid_count = sum(len(sequence) for sequence in self.__sequences)
return { amino_acid: float(count) / float(total_amino_acid_count) for amino_acid, count in self.get_abs_frequencies().items() }
# # Get relative path to script
# relative_path = os.path.dirname(__file__)
#
# # Global genome sequence (genome.txt must be in the same directory as orfs_test.py)
# aadist = AADist('tests/tests.fasta')
#
# print(aadist.get_abs_frequencies())
# print(aadist.get_av_frequencies())<file_sep># -*- coding: utf-8 -*-
"""
IMPORTANT!:
Before writing an email asking questions such as
'What does this input has to be like?' or
'What return value do you expect?' PLEASE read our
exercise sheet and the information in this template
carefully.
If something is still unclear, PLEASE talk to your
colleagues before writing an email!
If you experience technical issues or if you find a
bug we are happy to answer your questions. However,
in order to provide quick help in such cases we need
to avoid unnecessary emails such as the examples
shown above.
"""
from Bio.PDB import PPBuilder
from Bio.PDB.MMCIF2Dict import MMCIF2Dict
from Bio.PDB.MMCIFParser import MMCIFParser # Tip: This module might be useful for parsing...
import numpy as np
############# Exercise 2: Protein Data Bank #############
# General remark: In our exercise every structure will have EXACTLY ONE model.
# This is true for nearly all X-Ray structures. NMR structures have several models.
class PDB_Parser:
CIF_PARSER = MMCIFParser() # parser object for reading in structure in CIF format
def __init__(self, path):
"""
Initialize every PDB_Parser with a path to a structure-file in CIF format.
An example file is included in the repository (7ahl.cif).
Tip: Store the parsed structure in an object variable instead of parsing it
again & again ...
"""
self.structure = self.CIF_PARSER.get_structure(123, path)
# 3.8 Chains
def get_number_of_chains(self):
"""
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
Return:
Number of chains in this structure as integer.
"""
return len(self.structure.child_list[0].child_list)
# 3.9 Sequence
def get_sequence(self, chain_id):
"""
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the amino acid sequence (single-letter alphabet!) of a given chain (chain_id)
in a Biopython.PDB structure as a string.
"""
sequence = 'SEQWENCE'
ppb = PPBuilder()
for pp in ppb.build_peptides(self.structure[0][chain_id]):
return pp.get_sequence()
# 3.10 Water molecules
def get_number_of_water_molecules(self, chain_id):
"""
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the number of water molecules of a given chain (chain_id)
in a Biopython.PDB structure as an integer.
"""
waaer = 0
for residue in self.structure[0][chain_id]:
if residue.get_resname() == 'HOH':
waaer += 1
return waaer
# 3.11 C-Alpha distance
def get_ca_distance(self, chain_id_1, index_1, chain_id_2, index_2):
"""
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id_1 : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
index_1 : index of a residue in a given chain in a Biopython.PDB structure
chain_id_2 : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
index_2 : index of a residue in a given chain in a Biopython.PDB structure
chain_id_1 and index_1 describe precisely one residue in a PDB structure,
chain_id_2 and index_2 describe the second residue.
Return:
Return the C-alpha (!) distance between the two residues, described by
chain_id_1/index_1 and chain_id_2/index_2. Round the returned value via int().
The reason for using two different chains as an input is that also the distance
between residues of different chains can be interesting.
Different chains in a PDB structure can either occur between two different proteins
(Heterodimers) or between different copies of the same protein (Homodimers).
"""
return int(self.structure[0][chain_id_1][index_1]['CA'] - self.structure[0][chain_id_2][index_2]['CA'])
def get_ca_distance_from(self, struct, chain_id_1, index_1, chain_id_2, index_2):
return int(struct[chain_id_1][index_1]['CA'] - struct[chain_id_2][index_2]['CA'])
def get_general_distance(self, chain_id_1, index_1, chain_id_2, index_2):
return 0
# 3.12 Contact Map
def get_contact_map(self, chain_id):
"""
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return a complete contact map (see description in exercise sheet)
for a given chain in a Biopython.PDB structure as numpy array.
The values in the matrix describe the c-alpha distance between all residues
in a chain of a Biopython.PDB structure.
Only integer values of the distance have to be given (see below).
"""
chain = []
# I think there should be a better way, but I'm past carrying at this point
for res in self.structure[0][chain_id]:
if res.resname != 'HOH':
chain.append(res)
length = len(chain)
contact_map = np.zeros((length, length), dtype=np.float32)
counter = 0
for a in chain:
inner_counter = 0
for b in chain:
try:
contact_map[counter][inner_counter] = a['CA'] - b['CA']
except:
print("Value at a:" + str(a) + ", b:" + str(b))
inner_counter += 1
counter += 1
return contact_map.astype(np.int64) # return rounded (integer) values
# 3.13 B-Factors
def get_bfactors(self, chain_id):
"""
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the B-Factors for all residues in a chain of a Biopython.PDB structure.
The B-Factors describe the mobility of an atom or a residue.
In a Biopython.PDB structure B-Factors are given for each atom in a residue.
Calculate the mean B-Factor for a residue by averaging over the B-Factor
of all atoms in a residue.
Sometimes B-Factors are not available for a certain residue;
(e.g. the residue was not resolved); insert np.nan for those cases.
Finally normalize your B-Factors using Standard scores (zero mean, unit variance).
You have to use np.nanmean, np.nanvar etc. if you have nan values in your array.
The returned data structure has to be a numpy array rounded again to integer.
"""
chain = self.structure[0][chain_id]
chain_clean = []
for res in chain:
if res.resname != 'HOH':
chain_clean.append(res)
length = len(chain_clean)
b_factors = np.zeros(length, dtype=np.float32)
outer_counter = 0
for residue in chain_clean:
#residue = chain_clean[i+1] # absolutely barbaric
sum = 0
counter = 0
for atoms in residue:
counter += 1
if atoms.bfactor > 0:
sum += atoms.bfactor
else:
# BFactor is not available
sum = -1
break
if sum == -1:
b_factors[outer_counter] = np.nan
else:
b_factors[outer_counter] = sum/counter
outer_counter += 1
mean = np.nanmean(b_factors)
var = np.nanvar(b_factors)
for i in range(length):
b_factors[i] = (b_factors[i] - mean)/np.sqrt(var)
return b_factors.astype(np.int64) # return rounded (integer) values
def main():
print('PDB parser class.')
return None
if __name__ == '__main__':
main()
<file_sep>##############
# Exercise 2.6
##############
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
with open('tests.txt') as f:
aa_seq = f.read().splitlines()
count = len(aa_seq)
return count
def get_average_length(self):
sum = 0
with open('tests.txt') as f:
aa_seq = f.read().splitlines()
count = len(aa_seq)
for line in aa_seq:
sum = sum + len(line)
return sum / count
def read_fasta(self, path):
fasta = []
test = []
head = []
with open(path) as file_one:
for line in file_one:
line = line.strip()
if not line:
continue
if line.startswith(">"):
active_sequence_name = line[1:]
head.append(active_sequence_name)
if active_sequence_name not in fasta:
test.append(''.join(fasta))
fasta = []
continue
fasta.append(line)
if fasta:
test.append(''.join(fasta))
test = test[1:]
removetable = str.maketrans('', '', '*')
out_list = [s.translate(removetable) for s in test]
file = open("tests.txt", "w")
for item in out_list:
file.write("%s\n" % item)
file.close()
return (head, out_list)
def get_abs_frequencies(self):
# return number of occurences not normalized by length
total = []
with open('tests.txt') as f:
aa_seq = f.read().splitlines()
for line in aa_seq:
for char in line:
total.append(char)
return dict((x, total.count(x)) for x in set(total))
def get_av_frequencies(self):
# return number of occurences normalized by length
total = []
with open('tests.txt') as f:
aa_seq = f.read().splitlines()
for line in aa_seq:
for char in line:
total.append(char)
a = dict((x, total.count(x)) for x in set(total))
sum = 0
b = {}
for key, value in a.items():
sum = sum + value
for key, value in a.items():
value = value / sum
b.setdefault(key)
b[key] = value
return b
<file_sep>#!/bin/bash
files_0="main"
files_1="orffinder aa_props aa_dist"
files_2="exe2_swissprot exe2_pdb"
files_3="global_alignment local_alignment"
dirs=($(find . -maxdepth 1 -mindepth 1 -type d -printf '%f\n'))
#cd templates
#find -name '*.zip' -exec sh -c 'unzip -d "${1%.*}" "$1"' _ {} \;
#Create results file
touch moss_results.txt
#Copy code files to directories. xy.py of each student now in folder xy with Matrikelnummer as name
for d in ${dirs[@]}; do
echo ${d}
cd ${d}
dirs_in_ex=($(find repos/zippedRepos -maxdepth 1 -mindepth 1 -type d -printf '%f\n'))
for d_inner in ${dirs_in_ex[@]}; do
echo ${d_inner}
ID=($(echo ${d_inner} | cut -d'-' -f8))
echo ${ID}
case $d in
0)
file_list=${files_0}
;;
1)
file_list=${files_1}
;;
2)
file_list=${files_2}
;;
3)
file_list=${files_3}
;;
esac
for f in ${file_list}; do
if [ ! -d $f ]; then
mkdir collected_files/${f}
fi
done
for f in ${file_list}; do
echo $f
cp repos/zippedRepos/${d_inner}/${f}.py collected_files/${f}/${ID}.py
done
done
cd ..
done
<file_sep>##############
# Exercise 2.6
##############
from collections import Counter
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
with open("fasta_output.txt", "r") as f:
aa_seq = f.read().splitlines()
count = len(aa_seq)
return count
def get_average_length(self):
sum = 0
with open('fasta_output.txt') as f:
aa_seq = f.read().splitlines()
seq_lenght = len(aa_seq)
for line in aa_seq:
sum = sum + len(line)
return sum/seq_lenght
def read_fasta(self, filename):
fasta = []
output = []
head = []
with open(filename, "r") as file_one:
for line in file_one:
line = line.strip()
if not line:
continue
if line.startswith(">"):
active_sequence_name = line[1:]
head.append(active_sequence_name)
if active_sequence_name not in fasta:
output.append(''.join(fasta))
fasta = []
continue
fasta.append(line)
if fasta:
output.append(''.join(fasta))
output = output[1:]
removetable = str.maketrans('', '', '*')
out_list = [s.translate(removetable) for s in output]
file = open("fasta_output.txt","w")
for item in out_list:
file.write("%s\n" % item)
file.close()
return (head,out_list)
def get_abs_frequencies(self):
# return number of occurences not normalized by length
total = []
with open('fasta_output.txt') as f:
aa_seq = f.read().splitlines()
for line in aa_seq:
for char in line:
total.append(char)
return dict((x,total.count(x)) for x in set(total))
def get_av_frequencies(self):
# return number of occurences normalized by length
abs_freqs = self.get_abs_frequencies()
sum = 0
av_freqs = {}
# Iterate through the absolute frequencies and sum them all up
for key, value in abs_freqs.items():
sum += value
for key, value in abs_freqs.items():
value /= sum
av_freqs[key] = value
return av_freqs
test = AADist("tests/tests.fasta")
test.read_fasta("tests/tests.fasta")
test._AADist__sequences = []
print(test.get_average_length())<file_sep>import numpy as np
from pathlib import Path
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.sequences=[]
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
self.sequences.append(sequence)
pass
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
ss=[]
for i in self.sequences:
if(word in i):
ss.append(i)
return ss
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
words=[]
num_words=0
wps=0
dd={}
for seq in self.sequences:
current_words=[]
for i in range(len(seq)-2):
words.append(seq[i:i+3])
current_words.append(seq[i:i+3])
wps+=len(set(current_words))
for w in set(current_words):
if(w not in dd):
dd[w]=1
else:
dd[w]+=1
num_seq=0
num_words=len(set(words))
wps/=len(self.sequences)
num_seq=sum(dd.values())/num_words
return (len(self.sequences),num_words, round(wps), round(num_seq))
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.blosum=substitution_matrix
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
if sequence!= None:
words=[]
for i in range(len(sequence)-3+1):
words.append(sequence[i:i+3])
words=list(set(words))
result_words=[]
for w in words:
for i in ALPHABET:
for j in ALPHABET:
for k in ALPHABET:
neww=i+j+k
score=self.blosum[AA_TO_INT[w[0]],AA_TO_INT[neww[0]]]+self.blosum[AA_TO_INT[w[1]],AA_TO_INT[neww[1]]]+self.blosum[AA_TO_INT[w[2]],AA_TO_INT[neww[2]]]
if(score>=T and neww not in result_words):
result_words.append(neww)
# result_words=list(set(result_words))
return result_words
else:
# words=[]
# for i in range(len(sequence)-3+1):
# words.append(sequence[i:i+3])
# words=list(set(words))
result_words=[]
# for w in words:
for i in ALPHABET:
for j in ALPHABET:
for k in ALPHABET:
for w in range(pssm.shape[0]-3+1):
neww=i+j+k
score=pssm[w,AA_TO_INT[neww[0]]]+pssm[w+1,AA_TO_INT[neww[1]]]+pssm[w+2,AA_TO_INT[neww[2]]]
if(score>=T and neww not in result_words):
result_words.append(neww)
# result_words=list(set(result_words))
return result_words
def get_words1(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
if sequence!= None:
# words=[]
# words.append(sequence[i:i+3])
# words=list(set(words))
result_words=[]
result_pos=[]
for ii in range(len(sequence)-3+1):
for i in ALPHABET:
for j in ALPHABET:
for k in ALPHABET:
w=sequence[ii:ii+3]
neww=i+j+k
score=self.blosum[AA_TO_INT[w[0]],AA_TO_INT[neww[0]]]+self.blosum[AA_TO_INT[w[1]],AA_TO_INT[neww[1]]]+self.blosum[AA_TO_INT[w[2]],AA_TO_INT[neww[2]]]
if(score>=T):
result_words.append(neww)
result_pos.append(ii)
# result_words=list(set(result_words))
return (result_words,result_pos)
else:
# words=[]
# for i in range(len(sequence)-3+1):
# words.append(sequence[i:i+3])
# words=list(set(words))
result_words=[]
result_pos=[]
# for w in words:
for i in ALPHABET:
for j in ALPHABET:
for k in ALPHABET:
for w in range(pssm.shape[0]-3+1):
neww=i+j+k
score=pssm[w,AA_TO_INT[neww[0]]]+pssm[w+1,AA_TO_INT[neww[1]]]+pssm[w+2,AA_TO_INT[neww[2]]]
if(score>=T):
result_words.append(neww)
result_pos.append(w)
# result_words=list(set(result_words))
return (result_words,result_pos)
# return ['AAA', 'YYY']
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
if(query!=None):
# t1=time.time()
words,pos=self.get_words1(sequence=query,pssm=None,T=T)
# print("time to get words ",time.time()-t1)
# t1=time.time()
for i,word in enumerate(words):
p=pos[i]
seqs=blast_db.get_sequences(word)
for seq in seqs:
positions=[]
for i in range(len(seq)-2):
if(seq[i:i+3]==word):
positions.append(i)
#x=4
# print("erlnrln")
for x in positions:
maxx=self.blosum[AA_TO_INT[word[0]],AA_TO_INT[query[p]]] +self.blosum[AA_TO_INT[word[1]],AA_TO_INT[query[p+1]]]+self.blosum[AA_TO_INT[word[2]],AA_TO_INT[query[p+2]]]
hsp_right_query=p+2
hsp_right_seq=x+2
hsp_left_query=p
hsp_left_seq=x
### in right
if(len(seq)>x+3 and len(query)>p+3):
extending=seq[x+3:]
s=maxx
q_counter=p+3
s_counter=0
# maxx=s
# hsp_right_query=p+2
# hsp_right_seq=x+2
while(True):
change=self.blosum[AA_TO_INT[extending[s_counter]],AA_TO_INT[query[q_counter]]]
s+=change
if(s>maxx):
maxx=s
hsp_right_query=q_counter
hsp_right_seq=x+3+s_counter
s_counter+=1
q_counter+=1
if(maxx-s>=X or len(extending)==s_counter or len(query)==q_counter):
break
### in left
if(x>0 and p>0):
extending=seq[0:x]
s=maxx
q_counter=p-1
s_counter=-1
# maxx=s
while(True):
change=self.blosum[AA_TO_INT[extending[s_counter]],AA_TO_INT[query[q_counter]]]
s+=change
if(s>maxx):
maxx=s
hsp_left_query=q_counter
hsp_left_seq=x+s_counter
s_counter-=1
q_counter-=1
if(maxx-s>=X or -1*s_counter>len(extending) or q_counter<0):
break
if(maxx>=S):
tup=(hsp_left_query,hsp_left_seq,hsp_right_seq-hsp_left_seq+1,maxx)
if(seq not in d):
d[seq]=[tup]
else:
if(tup not in d[seq]):
d[seq].append(tup)
#################################################################3
else:
words,pos=self.get_words1(sequence=None,pssm=pssm,T=T)
for i,word in enumerate(words):
p=pos[i]
seqs=blast_db.get_sequences(word)
for seq in seqs:
positions=[]
for i in range(len(seq)-2):
if(seq[i:i+3]==word):
positions.append(i)
#x=4
# print("erlnrln")
for x in positions:
# maxx=pssm[AA_TO_INT[word[0]],AA_TO_INT[query[p]]] +pssm[AA_TO_INT[word[1]],AA_TO_INT[query[p+1]]]+pssm[AA_TO_INT[word[2]],AA_TO_INT[query[p+2]]]
maxx=pssm[p,AA_TO_INT[word[0]]] +pssm[p+1,AA_TO_INT[word[1]]]+pssm[p+2,AA_TO_INT[word[2]]]
hsp_right_query=p+2
hsp_right_seq=x+2
hsp_left_query=p
hsp_left_seq=x
### in right
if(len(seq)>x+3 and pssm.shape[0]>p+3):
extending=seq[x+3:]
s=maxx
q_counter=p+3
s_counter=0
# maxx=s
# hsp_right_query=p+2
# hsp_right_seq=x+2
while(True):
# change=pssm[AA_TO_INT[extending[s_counter]],AA_TO_INT[query[q_counter]]]
change=pssm[q_counter,AA_TO_INT[extending[s_counter]]]
s+=change
if(s>maxx):
maxx=s
hsp_right_query=q_counter
hsp_right_seq=x+3+s_counter
s_counter+=1
q_counter+=1
if(maxx-s>=X or len(extending)==s_counter or pssm.shape[0]==q_counter):
break
### in left
if(x>0 and p>0):
extending=seq[0:x]
s=maxx
q_counter=p-1
s_counter=-1
# maxx=s
while(True):
# change=pssm[AA_TO_INT[extending[s_counter]],AA_TO_INT[query[q_counter]]]
change=pssm[q_counter,AA_TO_INT[extending[s_counter]]]
s+=change
if(s>maxx):
maxx=s
hsp_left_query=q_counter
hsp_left_seq=x+s_counter
s_counter-=1
q_counter-=1
if(maxx-s>=X or -1*s_counter>len(extending) or q_counter<0):
break
if(maxx>=S):
tup=(hsp_left_query,hsp_left_seq,hsp_right_seq-hsp_left_seq+1,maxx)
if(seq not in d):
d[seq]=[tup]
else:
if(tup not in d[seq]):
d[seq].append(tup)
# d['SEQWENCE'] = [(1, 2, 4, 13)]
# print("Rest time ",time.time()-t1)
return d
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
<file_sep>##############
# Exercise 2.5
##############
# You can use tiny_dna.txt for your own testing. Good luck!
code_dict={
'TTT':'F',
'TTC':'F',
'TTA':'L',
'TTG':'L',
'CTT':'L',
'CTC':'L',
'CTA':'L',
'CTG':'L',
'ATT':'I',
'ATC':'I',
'ATA':'I',
'ATG':'M',
'GTT':'V',
'GTC':'V',
'GTA':'V',
'GTG':'V',
'TCT':'S',
'TCC':'S',
'TCA':'S',
'TCG':'S',
'CCT':'P',
'CCC':'P',
'CCA':'P',
'CCG':'P',
'ACT':'T',
'ACC':'T',
'ACA':'T',
'ACG':'T',
'GCT':'A',
'GCC':'A',
'GCA':'A',
'GCG':'A',
'TAT':'Y',
'TAC':'Y',
'TAA':'_',
'TAG':'_',
'CAT':'H',
'CAC':'H',
'CAA':'Q',
'CAG':'Q',
'AAT':'N',
'AAC':'N',
'AAA':'K',
'AAG':'K',
'GAT':'D',
'GAC':'D',
'GAA':'E',
'GAG':'E',
'TGT':'C',
'TGC':'C',
'TGA':'_',
'TGG':'W',
'CGT':'R',
'CGC':'R',
'CGA':'R',
'CGG':'R',
'AGT':'S',
'AGC':'S',
'AGA':'R',
'AGG':'R',
'GGT':'G',
'GGC':'G',
'GGA':'G',
'GGG':'G'
}
def toAminoAcid(orf):
aminoAcids = []
for i in range(0, len(orf), 3):
aminoAcids.append(code_dict[orf[i:i+3]])
return ''.join(aminoAcids)
def reverseComplement(orf):
newOrf=""
for i in range(0,len(orf)):
if(orf[i]=='G'):
newOrf=newOrf+'C'
elif(orf[i]=='T'):
newOrf=newOrf+'A'
elif(orf[i]=='A'):
newOrf=newOrf+'T'
elif(orf[i]=='C'):
newOrf=newOrf+'G'
newOrf=newOrf[::-1]
return newOrf
def searcher(orf, reverseFlag, begList, endList, proteinList, flagList):
begIndex = -1
if reverseFlag == False:
for t in range(0,3):
for i in range(0, len(orf)-t -3, 3):
curr = code_dict[orf[i+t : i+t+3]]
if curr == 'M' and begIndex == -1:
begIndex = i + t
if curr == '_' and begIndex != -1:
endIndex = i + t
if ((endIndex - begIndex) / 3) + 1 > 34:
protein = toAminoAcid(orf[begIndex:endIndex])
if protein not in proteinList and protein != '':
proteinList.append(protein)
begList.append(begIndex % (len(orf)//2))
flagList.append(reverseFlag)
endList.append(endIndex % (len(orf)//2)+2)
begIndex = -1
begIndex = -1
else:
for t in range(0,3):
for i in range(0, len(orf)-t -3, 3):
curr = code_dict[orf[i+t : i+t+3]]
if curr == 'M' and begIndex == -1:
begIndex = len(orf) - 1 - i - t
if curr == '_' and begIndex != -1:
endIndex = len(orf) - 1 - i - t
if ((begIndex - endIndex) / 3) + 1 > 34:
protein = toAminoAcid(orf[len(orf)-1 - begIndex:len(orf) - 1 - endIndex])
if protein not in proteinList and protein != '':
proteinList.append(protein)
begList.append(begIndex % ((len(orf))//2))
flagList.append(reverseFlag)
endList.append(endIndex % ((len(orf))//2)-2)
begIndex = -1
begIndex = -1
return begList, endList, proteinList, flagList
def get_orfs(orf):
DNA=['A','T','C','G']
for i in orf:
if i not in DNA:
raise TypeError("incorrect input")
if('A' not in orf) or ('G' not in orf) or ('C' not in orf) or ('T' not in orf):
raise TypeError("incorrect input")
orf = orf + orf
orf = orf.upper()
begList=list()
endList=list()
proteinList=list()
flagList= list()
begList, endList, proteinList, flagList = searcher(orf, False, begList, endList, proteinList, flagList)
begList, endList, proteinList, flagList = searcher(reverseComplement(orf), True, begList, endList, proteinList, flagList)
tuples = list(zip(begList, endList, proteinList, flagList))
results = []
for i in tuples:
flag = True
for j in tuples:
#If the end index is equal, get the longest protein
if i[1] == j[1] and len(i[2]) < len(j[2]):
flag = False
break
if flag:
results.append(i)
return results<file_sep>##############
# Exercise 2.7
##############
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
isPosCharged = "RHK"
if aa in isPosCharged:
return True
def isNegativelyCharged(aa):
isNegCharged = "DE"
if aa in isNegCharged:
return True
def isHydrophobic(aa):
isHydro = "AVILMFYW"
if aa in isHydro:
return True
def isAromatic(aa):
isAr = "HFWY"
if aa in isAr:
return True
def isPolar(aa):
isP = "RNDQEHKSTY"
if aa in isP:
return True
def isProline(aa):
if aa == "P":
return True
def containsSulfur(aa):
containsS = "CM"
if aa in containsS:
return True
def isAcid(aa):
isA = "DE"
if aa in isA:
return True
def isBasic(aa):
isB = "RKH"
if aa in isB:
return True<file_sep>import numpy as np
import re
from pathlib import Path
from itertools import permutations
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.w = 3
self.blast_db = []
# print(self.possible_words)
pass
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
if bool(re.match('^[' + ALPHABET[::-1] + ALPHABET.lower() + ']+$',
sequence)) is not True: # check if gnome is valid Protein sequence
raise TypeError("Invalid Amino Acid Sequence")
self.blast_db.append(sequence)
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
sequences = []
if len(word) != self.w:
raise TypeError("Invalid word length")
for seq in self.blast_db:
if word.lower() in seq.lower():
sequences.append(seq)
return sequences
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
no_of_sequences = 0
no_of_different_words = 0
avg_words_per_sequence = 0
avg_sequence_per_word = 0
# Number of sequences in database
no_of_sequences = len(self.blast_db)
different_words_in_sequence = []
different_words_in_db = []
sum_words_per_seq = 0
for seq in self.blast_db:
words_per_seq = 0
words = []
for i in range(0, int(len(seq) / self.w) * self.w):
if len(seq[i:i + self.w]) == self.w: words.append(seq[i:i + self.w])
for i in range(1, int(len(seq[1:]) / self.w)):
if len(seq[i:i + self.w]) == self.w: words.append(seq[i:i + self.w])
for i in range(2, int(len(seq[2:]) / self.w)):
if len(seq[i:i + self.w]) == self.w: words.append(seq[i:i + self.w])
words = list(set(words))
words_per_seq = len(words)
sum_words_per_seq += words_per_seq
different_words_in_sequence.append(words)
different_words_in_db.extend(words)
different_words_in_db = list(set(different_words_in_db))
# Number of different words in database
no_of_different_words = len(different_words_in_db)
# Average number of words per sequence (rounded to nearest int)
avg_words_per_sequence = sum_words_per_seq / len(self.blast_db)
# Average number of sequences per word (rounded to nearest int)
sum_sequence_per_word = 0
for word in different_words_in_db:
sequence_per_word = 0
for seq in self.blast_db:
if word in seq:
sequence_per_word += 1
sum_sequence_per_word += sequence_per_word
avg_sequence_per_word = sum_sequence_per_word / no_of_different_words
# round to nearest integer
avg_words_per_sequence = round(avg_words_per_sequence)
avg_sequence_per_word = round(avg_sequence_per_word)
return (no_of_sequences, no_of_different_words, avg_words_per_sequence, avg_sequence_per_word)
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.sm = substitution_matrix
self.possible_words = []
word_permutations = list(permutations(ALPHABET, 3))
for perm in word_permutations:
string = ""
for char in perm:
string += char
self.possible_words.append(string)
pass
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
words = []
# if sequence != None:
# print(sequence)gi
# for i in range(0, len(sequence) - 2):
# print(sequence[i:i + 3])
# if len(sequence[i:i + 3]) == 3:
# s = sequence[i:i + 3]
# for w in self.possible_words:
# print(w)
# val = self.sm[AA_TO_INT[s[i]], AA_TO_INT[w[i]]]
# val += self.sm[AA_TO_INT[s[i + 1]], AA_TO_INT[w[i + 1]]]
# val += self.sm[AA_TO_INT[s[i + 2]], AA_TO_INT[w[i + 2]]]
# print(val)
# if val >= T:
# words.append(w)
# words = list(set(words))
return words
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]
test = BlastDb()
# ACDEFGHIKLMNPQRSTVWY
test.add_sequence("ACDACD")
test.add_sequence("EFGACD")
test.add_sequence("HIK")
sub = [
[ 4.0, 0.0, -2.0, -1.0, -2.0, 0.0, -2.0, -1.0, -1.0, -1.0, -1.0, -2.0, -1.0, -1.0, -1.0, 1.0, 0.0, 0.0, -3.0, -2.0 ],
[ 0.0, 9.0, -3.0, -4.0, -2.0, -3.0, -3.0, -1.0, -3.0, -1.0, -1.0, -3.0, -3.0, -3.0, -3.0, -1.0, -1.0, -1.0, -2.0, -2.0 ],
[ -2.0, -3.0, 6.0, 2.0, -3.0, -1.0, -1.0, -3.0, -1.0, -4.0, -3.0, 1.0, -1.0, 0.0, -2.0, 0.0, -1.0, -3.0, -4.0, -3.0 ],
[ -1.0, -4.0, 2.0, 5.0, -3.0, -2.0, 0.0, -3.0, 1.0, -3.0, -2.0, 0.0, -1.0, 2.0, 0.0, 0.0, -1.0, -2.0, -3.0, -2.0 ],
[ -2.0, -2.0, -3.0, -3.0, 6.0, -3.0, -1.0, 0.0, -3.0, 0.0, 0.0, -3.0, -4.0, -3.0, -3.0, -2.0, -2.0, -1.0, 1.0, 3.0 ],
[ 0.0, -3.0, -1.0, -2.0, -3.0, 6.0, -2.0, -4.0, -2.0, -4.0, -3.0, 0.0, -2.0, -2.0, -2.0, 0.0, -2.0, -3.0, -2.0, -3.0 ],
[ -2.0, -3.0, -1.0, 0.0, -1.0, -2.0, 8.0, -3.0, -1.0, -3.0, -2.0, 1.0, -2.0, 0.0, 0.0, -1.0, -2.0, -3.0, -2.0, 2.0 ],
[ -1.0, -1.0, -3.0, -3.0, 0.0, -4.0, -3.0, 4.0, -3.0, 2.0, 1.0, -3.0, -3.0, -3.0, -3.0, -2.0, -1.0, 3.0, -3.0, -1.0 ],
[ -1.0, -3.0, -1.0, 1.0, -3.0, -2.0, -1.0, -3.0, 5.0, -2.0, -1.0, 0.0, -1.0, 1.0, 2.0, 0.0, -1.0, -2.0, -3.0, -2.0 ],
[ -1.0, -1.0, -4.0, -3.0, 0.0, -4.0, -3.0, 2.0, -2.0, 4.0, 2.0, -3.0, -3.0, -2.0, -2.0, -2.0, -1.0, 1.0, -2.0, -1.0 ],
[ -1.0, -1.0, -3.0, -2.0, 0.0, -3.0, -2.0, 1.0, -1.0, 2.0, 5.0, -2.0, -2.0, 0.0, -1.0, -1.0, -1.0, 1.0, -1.0, -1.0 ],
[ -2.0, -3.0, 1.0, 0.0, -3.0, 0.0, 1.0, -3.0, 0.0, -3.0, -2.0, 6.0, -2.0, 0.0, 0.0, 1.0, 0.0, -3.0, -4.0, -2.0 ],
[ -1.0, -3.0, -1.0, -1.0, -4.0, -2.0, -2.0, -3.0, -1.0, -3.0, -2.0, -2.0, 7.0, -1.0, -2.0, -1.0, -1.0, -2.0, -4.0, -3.0 ],
[ -1.0, -3.0, 0.0, 2.0, -3.0, -2.0, 0.0, -3.0, 1.0, -2.0, 0.0, 0.0, -1.0, 5.0, 1.0, 0.0, -1.0, -2.0, -2.0, -1.0 ],
[ -1.0, -3.0, -2.0, 0.0, -3.0, -2.0, 0.0, -3.0, 2.0, -2.0, -1.0, 0.0, -2.0, 1.0, 5.0, -1.0, -1.0, -3.0, -3.0, -2.0 ],
[ 1.0, -1.0, 0.0, 0.0, -2.0, 0.0, -1.0, -2.0, 0.0, -2.0, -1.0, 1.0, -1.0, 0.0, -1.0, 4.0, 1.0, -2.0, -3.0, -2.0 ],
[ 0.0, -1.0, -1.0, -1.0, -2.0, -2.0, -2.0, -1.0, -1.0, -1.0, -1.0, 0.0, -1.0, -1.0, -1.0, 1.0, 5.0, 0.0, -2.0, -2.0 ],
[ 0.0, -1.0, -3.0, -2.0, -1.0, -3.0, -3.0, 3.0, -2.0, 1.0, 1.0, -3.0, -2.0, -2.0, -3.0, -2.0, 0.0, 4.0, -3.0, -1.0 ],
[ -3.0, -2.0, -4.0, -3.0, 1.0, -2.0, -2.0, -3.0, -3.0, -2.0, -1.0, -4.0, -4.0, -2.0, -3.0, -3.0, -2.0, -3.0, 11.0, 2.0 ],
[ -2.0, -2.0, -3.0, -2.0, 3.0, -3.0, 2.0, -1.0, -2.0, -1.0, -1.0, -2.0, -3.0, -1.0, -2.0, -2.0, -2.0, -1.0, 2.0, 7.0 ]
]
check = Blast(np.array(sub, dtype=np.int64))
print(check.get_words(sequence="MVATGLFVGLNKGHVVTKREQPPRPNNRKGKTSKRTIFIRNLIKEVAGQAPYEKRITELLKVGKDKRALKVAKRKLGTHKRAKRKREEMSSVLRKMRSGGAGASEKKK", T=13))
<file_sep>##############
# Exercise 2.6
##############
from ex13 import read_fasta
from collections import Counter
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.sequences = []
self.read_fasta(filepath)
self.counter = Counter()
self.total_length = 0.0
def get_counts(self):
return len(self.sequences)
def get_average_length(self):
for el in self.sequences:
self.total_length += len(el)
avg_length = self.total_length / self.get_counts()
return avg_length
def read_fasta(self, path):
self.sequences = read_fasta(path)
def get_abs_frequencies(self):
self.counter = Counter()
for el in self.sequences:
self.counter += Counter(el)
return self.counter
def get_av_frequencies(self):
self.get_abs_frequencies()
for aa in self.counter:
self.counter[aa] = self.counter[aa] /self.total_length
return self.counter
<file_sep>import numpy as np
from numpy import inf
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
self.sequences = sequences
self.size = len(sequences)
if self.size == 0:
raise TypeError
self.length = len(sequences[0])
for seq in sequences:
if len(seq) != self.length:
raise TypeError
for r in seq:
if r not in ALPHABET:
raise TypeError
self.calculate()
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
bg_freq = None
if bg_matrix is not None:
bg_matrix = np.array(bg_matrix)
bg_freq = bg_matrix.sum(1)
# Calculate Sequence Weights
# DONE!
# Count (with weights) observed amino acids and gaps [!]
f_matrix = np.zeros((self.length, 21), np.float64)
for sequence_index in range(0, self.size):
seq = self.sequences[sequence_index]
seq_weight = self.weights[sequence_index]
for column_index in range(0, self.length):
residue = seq[column_index]
if use_sequence_weights:
f_matrix[column_index][AA_TO_INT[residue]] += seq_weight
else:
f_matrix[column_index][AA_TO_INT[residue]] += 1
# Redistribute gaps according to background frequencies [!]
if redistribute_gaps:
for column_index in range(0, self.length):
gaps = f_matrix[column_index][GAP_INDEX]
for aa in range(0, 20):
if bg_freq is not None:
f_matrix[column_index][aa] += gaps * bg_freq[aa]
else:
f_matrix[column_index][aa] += gaps * 0.05
# pssm from count
pssm = np.zeros((self.length, 20), np.float64)
for column_index in range(0, self.length):
for aa in range(0, 20):
pssm[column_index][aa] = f_matrix[column_index][aa]
# Add weighted pseudo-counts [!]
if add_pseudocounts:
p_counts = np.zeros_like(pssm)
for column_index in range(0, self.length):
for aa in range(0, 20):
value = 0
for aa_2 in range(0, 20):
if bg_matrix is not None:
value += pssm[column_index][aa_2] * bg_matrix[aa][aa_2] / bg_freq[aa_2]
else:
value += pssm[column_index][aa_2] * 0.0025 / 0.05
p_counts[column_index][aa] = value
for column_index in range(0, self.length):
for aa in range(0, 20):
alpha = self.num_observations - 1
pseudo_count = p_counts[column_index][aa]
frequency = pssm[column_index][aa]
pssm[column_index][aa] = (alpha * frequency + beta * pseudo_count) / (alpha + beta)
# Normalize to relative frequencies
row_sums = pssm.sum(1)
for index in range(0, self.length):
pssm[index] = pssm[index] / row_sums[index]
# Divide by background frequencies
if bg_matrix is None:
pssm /= 0.05
else:
for column_index in range(0, self.length):
for aa in range(0, 20):
pssm[column_index][aa] /= bg_freq[aa]
# Calculate log scores
pssm = 2 * np.log2(pssm)
pssm[pssm == -inf] = -20
# Remove rows
primary_sequence = self.get_primary_sequence()
without_rows = np.zeros((len(primary_sequence), 20), np.float64)
index = 0
for column_index in range (0, self.length):
if self.sequences[0][column_index] == '-':
continue
without_rows[index] = pssm[column_index]
index += 1
return np.rint(without_rows).astype(np.int64)
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return self.size, self.length
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
return ''.join([r if r != '-' else '' for r in self.sequences[0]])
def calculate(self):
r_values = np.zeros(self.length)
weights = np.zeros((self.length, self.size), np.float64)
# calculate r values
column_residue_map = dict()
for index in range(0, self.length):
column_residue_map[index] = dict()
for residue in ALPHABET:
column_residue_map[index][residue] = 0
for seq in self.sequences:
column_index = 0
for residue in seq:
column_residue_map[column_index][residue] = column_residue_map[column_index][residue] + 1
column_index += 1
for index in range(0, self.length):
for count in column_residue_map[index].values():
r_values[index] += 1 if count > 0 else 0
for i in range(0, self.length):
for j in range(0, self.size):
if r_values[i] > 1:
residue = self.sequences[j][i]
weights[i][j] = 1 / (r_values[i] * column_residue_map[i][residue])
# Weights
weights = weights.sum(0)
self.weights = weights
# Num-Observations
self.num_observations = 0
for r_value in r_values:
self.num_observations += r_value
self.num_observations /= self.length
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
return self.weights
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
return self.num_observations
msa = [
"--------MVATGLFVGLNKGHVVTKRE----------QPPRPNNRKGKTSKRTIFIRNLIKEVAGQAPYEKRITELLKVGKD---KRALKVAKRKLGTHKRAKRKREEMSSVLRKMRSGGAGASEKKK----",
"----MTTPQVKTGLFVGLNKGHVVTRRE----------LAPRPRSRKGKTSKRTIFIRNLIKEVAGQAPYEKRITELLKVGKD---KRALKVAKRKLGTHKRAKRKREEMSSVLRKMRSGGGGATEKKK----",
"----MTTPAVKTGLFVGLNKGHVVTRRE----------LAPRPNSRKGKTSKRTIFIRKLIREVAGMAPYEKRITELLKVGKD---KRALKVAKRKLGTHKRAKRKREEMSSVLRKMRSLGGAAAAEKKM---",
"----MTTPQVKTGLFVGLNKGHVVTRRE----------LAPRPRSRKGKTSKRTIFIRNLIKEVAGQAPYEKRITELLK------------VAKRKLGTHKRAKRKREEMSSVLRKMRSGGGGATEKKK----",
"----------MGEIAVGLNKGHQVTKKA----------GTPRPSRRKGFLSQRVKKVRAVVREVAGWAPYERRVMELLKVGKD---KRALKMCKRKLGTHMRGKKKREEMAGVLRKMQAASKGE---------",
"----MAPKQPNTGLFVGLNKGHIVTKKE----------LAPRPSDRKGKTSKRTHFVRNLIREVAGFAPYEKRITELLKVGKD---KRALKVRQEKVGHSQESKEEER--GDVQCSP--------PDEGWWWY",
"---------MAPGLVVGLNKGKVLTKRQ----------LPERPSRRKGQLSKRTSFVRSIVREVAGFAPYERRVMELIRNSQD---KRARKLAKKRLGTLKRAKGKIEELTSVIQSSRLAH------------",
"-------MGVQYKLAVGLGKGHKVTKNE----------YKPRPSRRKGALSKHTRFVRDLIREVCGFAPFERRAMELLKVSKD---KRALKFIKKRLGTHLRGKRKRDELSNVLVAQRKAAAHKEKTEHK---",
"-------MALRYPMAVGLNKGHKVTKNV----------SKPRHSRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"-------MALRYPMAVGLNKGHKVTKNV----------SKPRHSRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"-------MALRYPMAVGLNKGHKVTKNV----------GKPRHSRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"---------MAPGLVVGLNKGKTLTKRQ----------LPERPSRRKGHLSKRTAFVRSIVREVAGFAPYERRVMELIRNSQD---KRARKLAKKRLGTLKRAKGKIEELTSVIQSSRLAH------------",
"-------MAIRYPMAVGLNKGHKVTKNV----------SKPRHCRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"-------MAIRYPMAVGLNKGHRVTKNV----------TKPRHCRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRIGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"-------MAIRYPMAVGLNKGHRVTKNV----------TKPRHCRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRIGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"-------MALRYPMAVGLNKGHKVTKNV----------SKPRHSRRRGRLTKHTKFVRDMIREVCAFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"-------MAIRYPMAVGLNKGYKVTKNV----------SKPRHCRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"-------MVVRYPMAVGLNKGHKVTKNV----------SKPKHSRRRGRLTKHAKFARDLIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNTLAAMRKAAAKKE--------",
"-------MAIRYPMAVGLKKGHPVTKNV----------TKPKHSRRGGRLTKHSKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNILAAMRKAAAKKE--------",
"---MAKEAPAKTGLAVGLNKGHKTTARV----------VKPRVSRTKGHLSKRTAFVREVVKEVAGLAPYERRVIELLRNSKD---KRARKLAKKRLGTFGRAKRKVDELQRVIAESRRAH------------",
"-------MALRYPMAVGLNKGHKVTKNV----------SKPRHSRRRSRLTNHTKFVRDMIREVCGFAPYERRAMELLKVSKS---KRALKFIKKRVGTHIRAKRKREELSNVLAAMEEAAAKKD--------",
"-----MSGPGIEGLAVGLNKGHAATQLP----------VKQRQNRHKGVASKKTKIVRELVREITGFAPYERRVLEMLRISKD---KRALKFLKRRIGTHRRAKGKREELQNVIIAQRKAHK-----------",
"--------MAKSGIAAGVNKGRKTTAKE----------VAPKISYRKGASSQRTVFVRSIVKEVAGLAPYERRLIELIRNAGE---KRAKKLAKKRLGTHKRALRKVEEMTQVIAESRRH-------------",
"-------MAVRYELAIGLNKGHKTSKIRNVKYTGDKKVKGLRGSRLKNIQTRHTKFMRDLVREVVGHAPYEKRTMELLKVSKD---KRALKFLKRRLGTHIRAKRKREELSNILTQLRKAQTHAK--------",
"-------MAVKTGIAIGLNKGKKVTQMT----------PAPKISYKKGAASNRTKFVRSLVREIAGLSPYERRLIDLIRNSGE---KRARKVAKKRLGSFTRAKAKVEEMNNIIAASRRH-------------",
"-------MTVKTGIAIGLNKGKKVTSMT----------PAPKISYKKGAASNRTKFVRSLVREIAGLSPYERRLIDLIRNSGE---KRARKVAKKRLGSFTRAKAKVEEMNNIIAASRRH-------------",
"---------MAKGQAVGINKGFITTQLE-------KKLQKHSAVQRKGKLGKRVALVRQVIREVTGFAPYEKRIIELIKAGSAKDSKKATKIARKRLGTHRRAKVKKALLEEAVRAQRKK-------------",
"MSSAATKPVKRSGIIKGFNKGHAVAKRT------------VTSTFKKQVVTKRVAAIRDVIREISGFSPYERRVSELLKSGLD---KRALKVAKKRLGSIQAGKKKRDDIANINRKASAK-------------",
"MKNA--------------------YKKVRVRYPVKRPDVKRKQRGPRAETQESRFLAAAVADEISGLSPLEKKAISLLEAKNN---NKAQKLLRKRLGSHKRAVAKVEKLARMLLEK----------------"
]
m = MSA(["AA-A", "A-DD", "AEDC"])
print("")
print("RESULT:")
print("")
print(m.get_pssm())<file_sep># -*- coding: utf-8 -*-
"""
IMPORTANT!:
Before writing an email asking questions such as
'What does this input has to be like?' or
'What return value do you expect?' PLEASE read our
exercise sheet and the information in this template
carefully.
If something is still unclear, PLEASE talk to your
colleagues before writing an email!
If you experience technical issues or if you find a
bug we are happy to answer your questions. However,
in order to provide quick help in such cases we need
to avoid unnecessary emails such as the examples
shown above.
"""
from Bio import SeqIO # Tip: This module might be useful for parsing...
############ Exercise 3: SwissProt ##########
class SwissProt_Parser:
PARSER = SeqIO
def __init__( self, path, frmt='uniprot-xml' ):
with open(path, "rU") as handle:
for self.record in SeqIO.parse(handle, frmt):
self.record_id=self.record.id
#self.sp_anno = None # Parse the XML file once and re-use it in the functions below
# 3.2 SwissProt Identifiers
def get_sp_identifier( self ):
identifier = self.record_id
return identifier
# 3.3 SwissProt Sequence length
def get_sp_sequence_length( self ):
seq_len = len(self.record.seq)
return seq_len
# 3.4 Organism
def get_organism( self ):
organism = self.record.annotations['organism']
return organism
# 3.5 Localizations
def get_localization( self ):
localization=['localization1','localization2']
localization = self.record.annotations['comment_subcellularlocation_location']
return localization
# 3.6 Cross-references to PDB
def get_pdb_support( self ):
pdb_ids = []
for i in self.record.dbxrefs:
if "PDB:" in i :
index = i.split(':',1)[1]
pdb_ids.append(index)
return pdb_ids
def main():
print('SwissProt XML Parser class')
return None
if __name__ == '__main__':
main()
<file_sep>import numpy as np
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
if len(sequences) == 0:
raise TypeError()
self.seq_length = len(sequences[0])
for seq in sequences:
if len(seq) != self.seq_length:
raise TypeError()
for c in seq:
if not c in ALPHABET:
raise TypeError()
self.sequences = sequences
# calculate sequence weights
self.weights = np.zeros(len(self.sequences))
r = np.zeros(self.seq_length)
for i in range(self.seq_length):
characters = []
for seq in sequences:
if seq[i] not in characters:
characters.append(seq[i])
r[i] = len(characters)
for k,seq in enumerate(self.sequences):
for i in range(self.seq_length):
s = 0
for other_seq in self.sequences:
if other_seq[i] == seq[i]:
s += 1
if r[i] != 1:
self.weights[k] += 1.0 / (r[i] * s)
# calculate number of independent observations
self.num_obs = np.sum(r) / float(self.seq_length)
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
pssm = np.zeros((self.seq_length, 20))
# calculate amino acid background frequencies
bg_frequencies = np.zeros(20)
if bg_matrix == None:
bg_frequencies.fill(0.05)
else:
for aa_idx in range(20):
bg_frequencies[aa_idx] = np.sum(bg_matrix[aa_idx])
# count (with weights) observed amino acids and gaps
gaps = np.zeros(self.seq_length)
for i in range(self.seq_length):
for k,seq in enumerate(self.sequences):
add = 1.0
if use_sequence_weights:
add = self.weights[k]
if AA_TO_INT[seq[i]] == GAP_INDEX:
gaps[i] += add
else:
pssm[i][AA_TO_INT[seq[i]]] += add
# redistribute gaps according to background frequencies
if redistribute_gaps:
pssm += np.outer(gaps, bg_frequencies)
# add weighted pseudocounts
if add_pseudocounts:
pseudocounts = np.zeros((self.seq_length, 20))
for i in range(self.seq_length):
for aa_idx in range(20):
for inner_aa_idx in range(20):
subst = 0.0025
if not bg_matrix is None:
subst = bg_matrix[aa_idx][inner_aa_idx]
pseudocounts[i][aa_idx] += (pssm[i][inner_aa_idx] / bg_frequencies[inner_aa_idx]) * subst
N = self.num_obs
pssm[i] = ((N-1) * pssm[i] + beta * pseudocounts[i]) / ((N-1)+beta)
# normalize to relative frequencies
row_sum = 1.0 / np.sum(pssm, axis=1)
pssm = (pssm.T * row_sum).T
# divide by background frequencies
pssm /= bg_frequencies
# calculate log-score
pssm = 2 * np.log2(pssm)
# remove rows corresponding to gaps in the primary sequence
mask = np.ones(self.seq_length, dtype=bool)
for i in range(self.seq_length):
if AA_TO_INT[self.sequences[0][i]] == GAP_INDEX:
mask[i] = False
pssm = pssm[mask]
# replace -inf by -20
pssm[~np.isfinite(pssm)] = -20
return np.rint(pssm).astype(np.int64)
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return (len(self.sequences), self.seq_length)
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
return self.sequences[0].replace('-','')
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
return self.weights.astype(np.float64)
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
return self.num_obs.astype(np.float64)
<file_sep>##############
# Exercise 2.6
##############
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
self.aa = {
'A': 0,
'R': 0,
'N': 0,
'D': 0,
'C': 0,
'E': 0,
'Q': 0,
'G': 0,
'H': 0,
'I': 0,
'L': 0,
'K': 0,
'M': 0,
'F': 0,
'P': 0,
'S': 0,
'T': 0,
'W': 0,
'Y': 0,
'V': 0,
}
def get_counts(self):
return len(self.__sequences)
def get_average_length(self):
n = self.get_counts()
l = [len(s) for s in self.__sequences]
s = sum(l)
print(n)
print(l)
print(s)
return s/n
def read_fasta(self, path):
with open(path,"r") as f:
seqs = []
seq =""
sequence_started = False
for line in f:
if line.startswith(">") or line.startswith(";"):
if sequence_started:
seqs.append(seq)
sequence_started = False
seq = ""
else:
sequence_started = True
line = line.strip()
if line.endswith("*"):
seq = seq+line[:-1]
else:seq = seq+line
seqs.append(seq)
self.__sequences = seqs
return seqs
def get_abs_frequencies(self):
# return number of occurences not normalized by length
aas = self.aa.keys()
for a in aas:
f = 0
for s in self.__sequences:
index = [1 for c in s if c ==a]
f = f + sum(index)
self.aa[a] = f
return self.aa
def get_av_frequencies(self):
# return number of occurences normalized by length
avg_f = self.aa
aas = avg_f.keys()
total_length = self.get_average_length()*self.get_counts()
for a in aas:
avg_f[a] = avg_f[a]/total_length
return avg_f
<file_sep>##############
# Exercise 2.6
##############
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
fh = open(self)
n = 0
for line in fh:
if line.startswith(">"):
n += 1
fh.close()
return n
def get_average_length(self):
begin = True
length = 0
prots = {}
fil = open(self, "rt")
lins = fil.readlines()
fil.close()
for lin in lins:
slin = lin.strip()
if slin[0] == '>':
if begin == False:
prots[pname] = seq
seq = ""
pname = slin[1:].strip()
begin = False
else:
seq = seq + slin
prots[pname] = seq
length = length + len(seq)
n = 0
for line in self:
if line.startswith(">"):
n += 1
self.close()
return float(length/n)
def read_fasta(self, path):
begin = True
length = 0
prots = {}
fil = open(path, "rt")
lins = fil.readlines()
fil.close()
for lin in lins:
slin = lin.strip()
if slin[0] == '>':
if begin == False:
prots[pname] = seq
seq = ""
pname = slin[1:].strip()
begin = False
else:
seq = seq + slin
prots[pname] = seq
length = length + len(seq)
return prots
def get_abs_frequencies(self):
# return number of occurences not normalized by length
pass
def get_av_frequencies(self):
# return number of occurences normalized by length
pass
<file_sep>##############
# Exercise 2.6
##############
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
return count
def get_average_length(self):
average_length=length/count
return (average_length)
def read_fasta(self, filename):
f = open(filename, "r")
first_line = f.readline()
header = list()
header.append(first_line[1:].strip());
global count
count = 1
aa_seq = ""
aa_list = list()
global length
length = 0
global abs_freq
abs_freq = {}
sequence_started = False
for line in f:
if line.startswith(">") or line.startswith(";"):
if sequence_started:
length += len(aa_seq)
a = dict((i,aa_seq.count(i)) for i in set(aa_seq))
abs_freq = { i: a.get(i, 0) + abs_freq.get(i, 0) for i in set(a) | set(abs_freq) }
aa_list.append(aa_seq)
aa_seq = ""
sequence_started = False
header.append(line[1:].strip())
count += 1
else:
continue
else:
sequence_started = True
aa_seq += line.strip()
if aa_seq.endswith('*'):
aa_seq = aa_seq[:-1]
length += len(aa_seq)
a = dict((i,aa_seq.count(i)) for i in set(aa_seq))
abs_freq = { i: a.get(i, 0) + abs_freq.get(i, 0) for i in set(a) | set(abs_freq) }
aa_list.append(aa_seq)
result = list(zip(header,aa_list))
return result
def get_abs_frequencies(self):
# return number of occurences not normalized by length
return (abs_freq)
def get_av_frequencies(self):
# return number of occurences normalized by length
total=0
for i in abs_freq:
total+=abs_freq[i]
for i in abs_freq:
abs_freq[i] = (abs_freq[i]/total)
return abs_freq
<file_sep>import numpy as np
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
self.seq_list = sequences
self.unnormalized_seq_list = []
################# Read from the file #################
if( len(sequences) == 0):
raise TypeError('ERROR')
if any((c not in list(ALPHABET)) for seq in sequences for c in seq ):
raise TypeError('ERROR')
length = len(sequences[0])
for seq in sequences:
if len(seq) != length:
raise TypeError('ERROR')
self.gapped_seq_length = len(self.seq_list[0])
pssm_gap = np.zeros([self.gapped_seq_length, len(ALPHABET)])
for idx in range(self.gapped_seq_length):
for seq in self.seq_list:
pssm_gap[idx][AA_TO_INT[seq[idx]]] += 1
self.pssm_freq_gapped = pssm_gap
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
if bg_matrix == None:
sum_weight_matrix = self.get_sequence_weights()
pssm = []
# weighted sequence basic pssm
if use_sequence_weights:
pssm = self.get_weighted_pssm(sum_weight_matrix)
elif not use_sequence_weights and redistribute_gaps:
pssm = self.get_redistributed_gaps_pssm()
elif not redistribute_gaps and not use_sequence_weights and add_pseudocounts:
pssm = self.get_pssm_with_pseudocounts(beta)
# unweighted basic pssm
else:
pssm = self.get_basic_pssm()
pssm /= np.sum(pssm, axis=1, keepdims = True)
pssm /= 0.05
np.seterr(divide='ignore')
pssm = np.log2(pssm) * 2
pssm[pssm == float("-inf")] = -20
np.round(pssm)
return np.rint(pssm).astype(np.int64)
else:
bg_array = self.get_background_frequency(bg_matrix)
if redistribute_gaps and not add_pseudocounts:
pssm = self.get_redistributed_gaps_pssm_bg(bg_array)
elif redistribute_gaps and add_pseudocounts and not use_sequence_weights:
pssm = self.get_pssm_with_pseudocounts_redistributed_bg(bg_matrix,beta)
elif not redistribute_gaps and use_sequence_weights and not add_pseudocounts:
sum_weight_matrix = self.get_sequence_weights()
pssm = self.get_weighted_pssm(sum_weight_matrix)
elif not redistribute_gaps and use_sequence_weights and add_pseudocounts:
sum_weight_matrix = self.get_sequence_weights()
pssm = self.get_pssm_with_pseudocounts_weights_bg(sum_weight_matrix, beta, bg_matrix)
elif redistribute_gaps and use_sequence_weights and add_pseudocounts:
sum_weight_matrix = self.get_sequence_weights()
pssm = self.get_pssm_with_pseudocounts_weights_redistributed_bg(bg_matrix, beta, sum_weight_matrix)
else:
pssm = self.get_basic_pssm()
pssm /= np.sum(pssm, axis=1, keepdims = True)
for i in range(len(pssm)):
for j in range(len(pssm[0])):
pssm[i][j] /= bg_array[j]
np.seterr(divide='ignore')
pssm = np.log2(pssm) * 2
pssm[pssm == float("-inf")] = -20
np.round(pssm)
return np.rint(pssm).astype(np.int64)
def get_basic_pssm(self):
primary_seq_length = len(self.get_primary_sequence())
primary_seq_with_gaps = self.seq_list[0];
non_empty_idxs = []
for i in range(len(primary_seq_with_gaps)):
if(primary_seq_with_gaps[i] != '-'):
non_empty_idxs.append(i)
pssm = np.zeros([primary_seq_length, len(ALPHABET)-1])
# create sequence list according to primary sequence
for seq_idx, seq in enumerate(self.seq_list):
for idx in range(len(self.seq_list[0])):
if idx in non_empty_idxs and seq[idx] != '-':
pssm[non_empty_idxs.index(idx)][AA_TO_INT[seq[idx]]] += 1
return pssm
def get_weighted_pssm(self, weights):
primary_seq_length = len(self.get_primary_sequence())
primary_seq_with_gaps = self.seq_list[0];
non_empty_idxs = []
for i in range(len(primary_seq_with_gaps)):
if(primary_seq_with_gaps[i] != '-'):
non_empty_idxs.append(i)
pssm = np.zeros([primary_seq_length, len(ALPHABET)-1])
# create sequence list according to primary sequence
for seq_idx, seq in enumerate(self.seq_list):
for idx in range(len(self.seq_list[0])):
if idx in non_empty_idxs and seq[idx] != '-':
pssm[non_empty_idxs.index(idx)][AA_TO_INT[seq[idx]]] += weights[seq_idx]
return pssm
def get_redistributed_gaps_pssm(self):
primary_seq_length = len(self.get_primary_sequence())
primary_seq_with_gaps = self.seq_list[0];
gaps = np.zeros([primary_seq_length])
non_empty_idxs = []
for i in range(len(primary_seq_with_gaps)):
if(primary_seq_with_gaps[i] != '-'):
non_empty_idxs.append(i)
pssm = np.zeros([primary_seq_length, len(ALPHABET)-1])
# create sequence list according to primary sequence
for seq_idx, seq in enumerate(self.seq_list):
for idx in range(len(self.seq_list[0])):
if idx in non_empty_idxs:
if seq[idx] != '-':
pssm[non_empty_idxs.index(idx)][AA_TO_INT[seq[idx]]] += 1
else:
gaps[non_empty_idxs.index(idx)] += 1
for i in range(len(pssm)):
if(gaps[i] != 0):
for j in range(len(pssm[i])):
pssm[i][j] += 0.05 * gaps[i]
return pssm
def get_redistributed_gaps_pssm_bg(self, bg_array):
primary_seq_length = len(self.get_primary_sequence())
primary_seq_with_gaps = self.seq_list[0];
gaps = np.zeros([primary_seq_length])
non_empty_idxs = []
for i in range(len(primary_seq_with_gaps)):
if(primary_seq_with_gaps[i] != '-'):
non_empty_idxs.append(i)
pssm = np.zeros([primary_seq_length, len(ALPHABET)-1])
# create sequence list according to primary sequence
for seq_idx, seq in enumerate(self.seq_list):
for idx in range(len(self.seq_list[0])):
if idx in non_empty_idxs:
if seq[idx] != '-':
pssm[non_empty_idxs.index(idx)][AA_TO_INT[seq[idx]]] += 1
else:
gaps[non_empty_idxs.index(idx)] += 1
for i in range(len(pssm)):
if(gaps[i] != 0):
for j in range(len(pssm[i])):
pssm[i][j] += bg_array[j] * gaps[i]
return pssm
def get_pssm_with_pseudocounts(self, beta):
primary_seq_length = len(self.get_primary_sequence())
primary_seq_with_gaps = self.seq_list[0];
non_empty_idxs = []
for i in range(len(primary_seq_with_gaps)):
if(primary_seq_with_gaps[i] != '-'):
non_empty_idxs.append(i)
pssm = np.zeros([primary_seq_length, len(ALPHABET)-1])
# create sequence list according to primary sequence
for seq_idx, seq in enumerate(self.seq_list):
for idx in range(len(self.seq_list[0])):
if idx in non_empty_idxs and seq[idx] != '-':
pssm[non_empty_idxs.index(idx)][AA_TO_INT[seq[idx]]] += 1
# pseudocounts
L = len(self.get_primary_sequence())
g = np.zeros((L, 20))
for i in range(L):
for a in range(20):
for j in range(20):
g[i][a] += (pssm[i][j] * 0.05)
# alpha beta change
alpha = self.get_number_of_observations() - 1
for i in range(L):
for j in range(20):
pssm[i][j] = ((alpha * pssm[i][j]) + (beta * g[i][j])) / (alpha + beta)
return pssm
def get_pssm_with_pseudocounts_redistributed_bg(self, bg_matrix, beta):
bg_array = self.get_background_frequency(bg_matrix)
# creates pssm with sequence weights and redistributed gaps
seq_width = len(self.get_primary_sequence())
pssm = np.zeros([seq_width, len(ALPHABET)-1])
gaps = np.zeros([seq_width])
primary_seq_length = len(self.get_primary_sequence())
primary_seq_with_gaps = self.seq_list[0];
non_empty_idxs = []
for i in range(len(primary_seq_with_gaps)):
if(primary_seq_with_gaps[i] != '-'):
non_empty_idxs.append(i)
pssm = np.zeros([primary_seq_length, len(ALPHABET)-1])
sum_weight_matrix = self.get_sequence_weights()
for seq_idx, seq in enumerate(self.seq_list):
for idx in range(len(self.seq_list[0])):
if idx in non_empty_idxs:
if seq[idx] != '-':
pssm[non_empty_idxs.index(idx)][AA_TO_INT[seq[idx]]] += 1
else:
gaps[non_empty_idxs.index(idx)] += 1
for i in range(len(pssm)):
if(gaps[i] != 0):
for j in range(len(pssm[i])):
pssm[i][j] += bg_array[j]*gaps[i]
# preudocounts
L = len(self.get_primary_sequence())
g = np.zeros((L, 20))
q = bg_matrix
P = self.get_background_frequency(bg_matrix)
for i in range(L):
for a in range(20):
for j in range(20):
g[i][a] += ((pssm[i][j] * q[a][j]) / P[j])
# alpha beta change
alpha = self.get_number_of_observations() - 1
for i in range(L):
for j in range(20):
pssm[i][j] = ((alpha * pssm[i][j]) + (beta * g[i][j])) / (alpha + beta)
return pssm
def get_pssm_with_pseudocounts_weights_bg(self, weights, beta, bg_matrix):
primary_seq_length = len(self.get_primary_sequence())
primary_seq_with_gaps = self.seq_list[0];
non_empty_idxs = []
for i in range(len(primary_seq_with_gaps)):
if(primary_seq_with_gaps[i] != '-'):
non_empty_idxs.append(i)
pssm = np.zeros([primary_seq_length, len(ALPHABET)-1])
# create sequence list according to primary sequence
for seq_idx, seq in enumerate(self.seq_list):
for idx in range(len(self.seq_list[0])):
if idx in non_empty_idxs and seq[idx] != '-':
pssm[non_empty_idxs.index(idx)][AA_TO_INT[seq[idx]]] += weights[seq_idx]
# pseudocounts
L = len(self.get_primary_sequence())
g = np.zeros((L, 20))
q = bg_matrix
P = self.get_background_frequency(bg_matrix)
for i in range(L):
for a in range(20):
for j in range(20):
g[i][a] += ((pssm[i][j] * q[a][j]) / P[j])
# alpha beta change
alpha = self.get_number_of_observations() - 1
for i in range(L):
for j in range(20):
pssm[i][j] = ((alpha * pssm[i][j]) + (beta * g[i][j])) / (alpha + beta)
return pssm
def get_pssm_with_pseudocounts_weights_redistributed_bg(self, bg_matrix, beta, weights):
primary_seq_length = len(self.get_primary_sequence())
primary_seq_with_gaps = self.seq_list[0];
bg_array = self.get_background_frequency(bg_matrix)
# creates pssm with sequence weights and redistributed gaps
seq_width = len(self.get_primary_sequence())
pssm = np.zeros([seq_width, len(ALPHABET)-1])
gaps = np.zeros([seq_width])
non_empty_idxs = []
for i in range(len(primary_seq_with_gaps)):
if(primary_seq_with_gaps[i] != '-'):
non_empty_idxs.append(i)
pssm = np.zeros([primary_seq_length, len(ALPHABET)-1])
sum_weight_matrix = self.get_sequence_weights()
for seq_idx, seq in enumerate(self.seq_list):
for idx in range(len(self.seq_list[0])):
if idx in non_empty_idxs:
if seq[idx] != '-':
pssm[non_empty_idxs.index(idx)][AA_TO_INT[seq[idx]]] += weights[seq_idx]
else:
gaps[non_empty_idxs.index(idx)] += weights[seq_idx]
for i in range(len(pssm)):
if(gaps[i] != 0):
for j in range(len(pssm[i])):
pssm[i][j] += bg_array[j]*gaps[i]
# preudocounts
L = len(self.get_primary_sequence())
g = np.zeros((L, 20))
q = bg_matrix
P = self.get_background_frequency(bg_matrix)
for i in range(L):
for a in range(20):
for j in range(20):
g[i][a] += ((pssm[i][j] * q[a][j]) / P[j])
# alpha beta change
alpha = self.get_number_of_observations() - 1
for i in range(L):
for j in range(20):
pssm[i][j] = ((alpha * pssm[i][j]) + (beta * g[i][j])) / (alpha + beta)
return pssm
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return (len(self.seq_list), len(self.seq_list[0]))
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
ungapped_primary_seq = self.seq_list[0].replace("-","")
return ungapped_primary_seq
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
seq_width = len(self.seq_list[0])
num_seq = len(self.seq_list)
weights_matrix = np.zeros([seq_width, num_seq])
pssm = np.zeros([seq_width, len(ALPHABET)])
for idx in range(seq_width):
for seq in self.seq_list:
pssm[idx][AA_TO_INT[seq[idx]]] += 1
for idx in range(seq_width):
for seq_idx, seq in enumerate(self.seq_list):
if(np.count_nonzero(pssm[idx]) > 1):
weights_matrix[idx,seq_idx] = 1/(pssm[idx][AA_TO_INT[seq[idx]]] * np.count_nonzero(pssm[idx]))
else:
weights_matrix[idx,seq_idx] = 0
sum_weight_matrix = np.sum(weights_matrix, axis=0)
weights = sum_weight_matrix # np.arange(666) + 42
return weights.astype(np.float64)
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
L = len(self.seq_list[0])
num_obs = 0
for i in range(L):
num_obs += np.count_nonzero(self.pssm_freq_gapped[i])
num_obs /= L
return np.float64(num_obs)
def get_background_frequency(self, bg_matrix):
backgroundFreq = np.zeros(20)
for i in range(len(bg_matrix)):
backgroundFreq[i] = sum(bg_matrix[i])
return backgroundFreq
<file_sep>import numpy as np
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.alignments = list()
self.align()
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
traceback = np.full((len(self.string2) + 1, len(self.string1) + 1, 3), fill_value=" ", dtype=np.str)
traceback[0][0] = 's'
for i in range(1, len(self.string2) + 1):
self.score_matrix[i][0] = self.score_matrix[i - 1][0] + self.gap_penalty
traceback[i][0][0] = 'u'
for j in range(1, len(self.string1) + 1):
self.score_matrix[0][j] = self.score_matrix[0][j - 1] + self.gap_penalty
traceback[0][j][2] = 'l'
for j in range(1, len(self.string1) + 1):
for i in range(1, len(self.string2) + 1):
cell1 = self.score_matrix[i - 1][j] + self.gap_penalty
cell2 = self.score_matrix[i][j - 1] + self.gap_penalty
cell3 = self.score_matrix[i - 1][j - 1] + self.substitution_matrix[self.string1[j - 1]][self.string2[i - 1]]
res = max(cell1, cell2, cell3)
self.score_matrix[i][j] = res
if res == cell1:
traceback[i][j][0] = 'u'
if res == cell3:
traceback[i][j][1] = 'd'
if res == cell2:
traceback[i][j][2] = 'l'
repeated = False
while not repeated:
alignment = list()
i = len(self.string2)
j = len(self.string1)
while traceback[i][j][0] != 's':
if traceback[i][j][0] == 'u':
alignment.append(self.string2[i - 1])
if traceback[i][j][1] != " " or traceback[i][j][2] != " ":
traceback[i][j][0] = " "
i -= 1
elif traceback[i][j][1] == 'd':
alignment.append(self.string2[i - 1])
if traceback[i][j][0] != " " or traceback[i][j][2] != " ":
traceback[i][j][1] = " "
i -= 1
j -= 1
elif traceback[i][j][2] == 'l':
alignment.append('-')
if traceback[i][j][1] != " " or traceback[i][j][0] != " ":
traceback[i][j][2] = " "
j -= 1
alignment.reverse()
if (self.string1, ''.join(alignment)) in self.alignments:
repeated = True
break
else:
self.alignments.append((self.string1, ''.join(alignment)))
def get_best_score(self):
"""
:return: the highest score for the aligned strings, int
"""
return self.score_matrix[len(self.string2)][len(self.string1)]
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score
"""
return len(self.alignments)
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
return self.alignments
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as a list of lists
"""
return self.score_matrix
<file_sep>##############
# Exercise 2.6
##############
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
file_path = ""
def __init__(self, filepath):
self.__sequences = []
self.file_path = filepath
self.read_fasta(filepath)
def get_counts(self):
return len(self.__sequences)
def get_average_length(self):
count = self.get_counts()
sum = 0
for line in self.__sequences:
sum = sum + len(line)
return (sum / count)
def read_fasta(self, path):
dataIntoArray = []
test = []
head = []
fasta = []
with open(path) as file_one:
for line in file_one:
line = line.strip()
if not line:
continue
if line.startswith(">"):
active_sequence_name = line[1:]
head.append(active_sequence_name)
if active_sequence_name not in fasta:
test.append(''.join(fasta))
fasta = []
continue
fasta.append(line)
if fasta:
test.append(''.join(fasta))
test = test[1:]
removetable = str.maketrans('', '', '*')
out_list = [s.translate(removetable) for s in test]
self.__sequences = out_list
def get_abs_frequencies(self):
# return number of occurences not normalized by length
frequency = []
for line in self.__sequences:
for char in line:
frequency.append(char)
return dict((x, frequency.count(x)) for x in set(frequency))
def get_av_frequencies(self):
# return number of occurences normalized by length
frequency = []
for line in self.__sequences:
for char in line:
frequency.append(char)
diction = dict((x, frequency.count(x)) for x in set(frequency))
sum = 0
av_frequency = {}
for key, value in diction.items():
sum = sum + value
print(sum)
for key, value in diction.items():
value = value / sum
av_frequency.setdefault(key)
av_frequency[key] = value
return av_frequency
<file_sep>import numpy as np
from pathlib import Path
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.sequences = []
self.total_word_count = 0
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
self.sequences.append(sequence)
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
result = [seq for seq in self.sequences if word in seq]
'''for seq in self.sequences:
if word in seq:
result.append(seq)'''
return result
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
for seq in self.sequences:
used_words = [seq[i:i+3] for i in range(0, len(seq)-2)]
used_words = np.unique(used_words)
self.total_word_count += len(used_words)
words = [seq[i:i+3] for seq in self.sequences for i in range(0, len(seq)-2)]
words = np.unique(words)
total_seq_count = sum(map(lambda y: len(list(filter(lambda x: y in x, self.sequences))), words))
seq_count = len(self.sequences)
avg_word_count = int(round(self.total_word_count / seq_count))
avg_seq_count = int(round(total_seq_count / len(words)))
return (seq_count, len(words), avg_word_count, avg_seq_count)
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.substitution_matrix = substitution_matrix
#all possible words
self.all_words = [aa1 + aa2 + aa3 for aa1 in ALPHABET for aa2 in ALPHABET for aa3 in ALPHABET]
def get_score(self, word1, word2):
a = self.substitution_matrix[AA_TO_INT[word1[0]], AA_TO_INT[word2[0]]]
b = self.substitution_matrix[AA_TO_INT[word1[1]], AA_TO_INT[word2[1]]]
c = self.substitution_matrix[AA_TO_INT[word1[2]], AA_TO_INT[word2[2]]]
return a + b + c
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
result = []
if sequence is not None:
for i in range(0,len(sequence)-2):
w = sequence[i:i+3]
for word in self.all_words:
if self.get_score(w, word) >= T:
if word not in result:
result.append(word)
if pssm is not None:
print(np.shape(pssm))
for word in self.all_words:
for row in range(0, np.shape(pssm)[0]-3):
if pssm[row, AA_TO_INT[word[0]]] + pssm[row+1, AA_TO_INT[word[1]]] + pssm[row+2, AA_TO_INT[word[2]]] >= T:
if word not in result:
result.append(word)
return result
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
if query is not None:
words = self.get_words(sequence=query, T=T)
else:
words = self.get_words(pssm=pssm, T=T)
for w in words:
targets = blast_db.get_sequences(w)
for target in targets:
target_start = target.index(w)
d['SEQWENCE'] = [(1, 2, 4, 13)]
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]
dbsequences = [
"MVQRLTYRRRLSYNTASNKTRLSRTPGNRIVYLYTKKVGKAPKSACGVLPGRLRGVVAVRPKVLMRLSKTKKHVQQGLWWLHVRQVCPDRIKRAFLIEEQKIVVKVLKAQAQSQKAK",
"MSGRLWCKAIFAGYKRGLRNQREHTALLKIEGVYARDETEFYLGKRCAYVYKAKNNTVTPGGKPNKTRVIWGKVTRAHGNSGMVRAKFRSNLPAKAIGHRIRVMLYPSRI",
"MACARPLISVYSEKGESSGKNVTLPAVFKAPIRPDIVNFVHTNLRKNNRQPYAVSELAGHQTSAESWGTGRAVARIPRVRGGGTH<KEY>",
"MP<KEY>KVKAFLADPSAFAAAAPVAAATTAAPAAAAAPAKVEAKEESEESDEDMGFGLFD",
"MYSEWRSLHLVIQNDQGHTSVLHSYPESVGREVANAVVR<KEY>LLYTDKDVKWTMEVICYGLTLPLDGET<KEY>GSSQIRLCLQVLRAIQKLARESSIMARETWEVLLLFLLQINDILLAPPTVQGGIAENLAEKLIGVLFEVWLLACTRCFPTPPYWKTAKEMVANWRHHPAVVEQWSKVICALTSRLLRFTYGPSFPPFKVPDEDANLIPPEMDNECIAQTWFRFLHMLSNPV<KEY>",
"<KEY>",
"<KEY>",
"<KEY>SVYAHFPINVVIQENGSLVEIRNFLGEKYIRRVRMRTGVACSVSQAQKDELILEGNDIELVSNSAALIQQATTVKNKDIRKFLDGIYVSEKGTVQQPDE",
"MPGWRLLAQGGAQVLGGGAGGLGAAPGLGSRKNILFVVRNLHSKSSTWWDEHLSEENVSFVKQLVSDENKAQLTSLLNPLKDEPWPLHPWEPGSSRVGLIALKLGMMPLWTKDGQKHAVTLLQVQDCHVLKYTPKEDHNGKTATLTVGGKTGSRLYKANSILEFYRDLGLPPKQTTKIFHVTDNAVIKQGTPLYAAHFRPGQYVDVTAKTIGKGFQGVMKRWGF<KEY>TKTHRRPGAISTGDIARVWPGTKMPGKMGNQNRTVYGLKVWRVNTKHNIIYVNGSVGHRNCLVKIKDSTLPAYKDLGKSLPFPTYFPDGDEEELPEDLYDESVRQPSDPSITFA",
"MVFRRFVEVGRVAYISFGPHAGKLVAIVDVIDQNRALVDGPCTRVRRQAMPFKCMQLTDFILKFPHSARQKYVRKAWEKADINTKWAATRWAKKIDARERKAKMTDFDRFKVMKAKKMRNRIIKTEVKKLQRAALLKASPKKAAVAKAAIAAAAAAKAKVPAKKATGPGQKAAAQKASAQKAAGQKAAPPAKGQKGQKTPAQKAPAPKAAGKKA",
"MAALRPLVKPKIVKKRTKKFIRHQSDRYVKIKRNWRKPRGIDNRVRRRFKGQILMPNIGYGSNKKTKHMLPSGFRKFLVHNVKELEVLLMCNKSYCAEIAHNVSSKNRKAIVERAAQLAIRVTNPNARLRSEENE",
"MDDEEETYRLWKIRKTIMQLCHDRGYLVTQDELDQTLEEFKAQFGDKPSEGRPRRTDLTVLVAHNDDPTDQMFVFFPEEPKVGIKTIKVYCQRMQEENITRALIVVQQGMTPSAKQSLVDMAPKYVLEQFLQQELLINITEHELVPEHVVMTKEEVTELLARYKLRESQLPRIQAGDPVARYFGIKRGQVVKIIRPSETAGRYITYRLVQ",
"<KEY>",
"<KEY>",
"<KEY>",
"MTDTVVNRWMYPGDGPLQSNDKEQLQAGWSVHPGAQTDRQRKQEELTDEEKEIINRVIARAEKMETMEQER<KEY>STTTRDSEGWDHGHGGGAGDTSRSPGG<KEY>",
"MSAHLQWMVVRNCSSFLIKRNKQTYSTEPNNLKARNSFRYNGLIHRKTVGVEAWPDGKGVVVVMKRRSGQRKPATSYVRTTINKNARATLSSIRHMIRKNKYRPDLRMAAIRRASAILRSQKPVVVKRKRTRPTKSS",
"MTSTSKAVELQLQVKHNAEELQDFMRDLEHWEKTMRQKDLELRRQSGVPEENLPPIRNGSFRKKKKRKTKDSSKKTKEENTKNRIKSFDYDAWAKLDVDSILDELDKEDSTHDSVSQESESDEDGVRVDSQKALVLKEKGNKYFKQGKYDEAIECYTKGMDADPYNPVLPTNRASAYFRLKKFAVAESDCNLAIALSRSYTKAYARRGA<KEY>GGG",
"MAPVKKLVAKGGKKKKQVLKFTLDCTHPVEDGIMDAANFEQFLQERIKVNGKAGNLGGGVVTIERSKSKITVTSEEPFSKRYLKYLTKKYLKKNNLRDWLRVVANSKESYELRYFQINQDEEEEEDED",
"MLASKHTPWRRLQGISFGMYSAEELKKLSVKSITNPRYVDSLGNPSADGLYDLALGPADSKEVCSTCVQDFNNCSGHLGHIDLPLTVYNPLLFDKLYLLLRGSCLNCHMLTCPRAAIHLLVCQLKVLDVGALQAVYELERILSRFLEETSDPSAFEIQEELEEYTSKILQNNLLGSQGAHVKNVCESRSKLVAHFWKTHMAAKRCPHCKTGRSVVRKEHNSKLTITYPAMVHKKSGQKDAELPEGAPAAPGIDEAQMGKRGYLTPSSAQEHLFAIWKNEGFFLNYLFSGLDDIGPESSFNPSMFFLDFIVVPPSRYRPINRLGDQMFTNGQTVNLQAVMKDAVLIRKLLAVMAQEQK<KEY>",
"<KEY>",
"MRIEKCYFCSGPIYPGHGMMFVRNDCKVFRFCKSKCHKNFKKKRNPRKVRWTKAFRKAAGKELTVDNSFEFEKRRNEPVKYQRELWNKTIDAMKRVEEIKQRRQAKFIMNRLKKNKELQKVQDIREVKQNIHLIRAPLAGKGKQLEEKMVQQLQEDVDMEDAS",
"MTAAVKWAVSHRTIWRHLFPIQNGAISSACHKSTYSSLPDDYNCKVELALTSDGRTIVCYHPSVDVPYEHTKPIPHPDLLHNNEETHEQILRTKLEGNHKHLEQGPMIEQLSKVFFTTKHRWYPHGQYHRCRKKLNPPKDR",
"MGLEKSLFLFSLLVLVLGWVQPSLGGESRESSADKFKRQHMDTEGPSKSSPTYCNQMMKRQGMTKGSCKPVNTFVHEPLEDVQAICSQGQVTCKNGRNNCHKSSSTLRITDCRLKGSSKYPNCDYTTTDSQKHIIIACDGNPYVPVHFDASV",
"MASDAASLLVLQLVLQPTLVTGITIQTAIKNFRILHVDYPMVNYPKGFHGYCNGLMAYVRGKLQDWYCPKIHYVVHAPFESIQKFCKYSESFCEDYNEYCTLTQNSFPITVCTLDHKQAPTSCSYNSTLTNQRLYLLCSRKHDAEPIGVIGLY",
"MWALRSLLRPLGLRTMSQGSARRPRPPKDPLRHLRTREKRGPGWGPGGPNTVYLQVVAAGGRDAAAALYVFSEYNRYLFNCGEGVQRLMQEHKLKVARLDNIFLTRMHWSNVGGLCGMILTLKETGLPKCVLSGPPQLEKYLEAIKIFSGPLKGIDLAVRPHSAPEYKDETMTVYQVPIHSERRCGEQEPSRSPKRSPNRLSPKQSSSDPGSAENGQCLPEGSSAGVNGKAWGRDPSLVVAFVCKLHLRKGNFLVLKAKELGLPVGTAAIAPIIAAVKDGKSITYEGREIAAEELCTPPDPGLVFIVVECPDEGFIQPICENDTFQRYQGEADAPVAVVVHIAPESVLIDSRYQQWMERFGPDTQHLILNENCPSVHNLR<KEY>",
"<KEY>INTRLEKTAKELEEEITFRKNVESTLRQLER<KEY>",
"MALCALASALRSLSLASPAITARVPTLLPVGQSNVLLQLPSALALPAHRPVHMSAD<KEY>",
"MAALFVRSVVASVVDLSRLAVKPRAFSILLGTLPSAKPCAEVRSLLCGGPVLSLQPSLGFKTKGVIKKRCRDCYMVKRRGRWFVLCKTNPKHKQRQM"
]
db = BlastDb()
for seq in dbsequences:
db.add_sequence(seq)
db.get_sequences('DLP')
print(db.get_db_stats())<file_sep>##############
# Exercise 2.5
##############
# You can use the supplied test cases for your own testing. Good luck!
codon_dict = {
'TCA' : 'S', # Serine
'TCC' : 'S', # Serine
'TCG' : 'S', # Serine
'TCT' : 'S', # Serine
'TTC' : 'F', # Phenylalanine
'TTT' : 'F', # Phenylalanine
'TTA' : 'L', # Leucine
'TTG' : 'L', # Leucine
'TAC' : 'Y', # Tyrosine
'TAT' : 'Y', # Tyrosine
'TAA' : '_', # Stop
'TAG' : '_', # Stop
'TGC' : 'C', # Cysteine
'TGT' : 'C', # Cysteine
'TGA' : '_', # Stop
'TGG' : 'W', # Tryptophan
'CTA' : 'L', # Leucine
'CTC' : 'L', # Leucine
'CTG' : 'L', # Leucine
'CTT' : 'L', # Leucine
'CCA' : 'P', # Proline
'CCC' : 'P', # Proline
'CCG' : 'P', # Proline
'CCT' : 'P', # Proline
'CAC' : 'H', # Histidine
'CAT' : 'H', # Histidine
'CAA' : 'Q', # Glutamine
'CAG' : 'Q', # Glutamine
'CGA' : 'R', # Arginine
'CGC' : 'R', # Arginine
'CGG' : 'R', # Arginine
'CGT' : 'R', # Arginine
'ATA' : 'I', # Isoleucine
'ATC' : 'I', # Isoleucine
'ATT' : 'I', # Isoleucine
'ATG' : 'M', # Methionine
'ACA' : 'T', # Threonine
'ACC' : 'T', # Threonine
'ACG' : 'T', # Threonine
'ACT' : 'T', # Threonine
'AAC' : 'N', # Asparagine
'AAT' : 'N', # Asparagine
'AAA' : 'K', # Lysine
'AAG' : 'K', # Lysine
'AGC' : 'S', # Serine
'AGT' : 'S', # Serine
'AGA' : 'R', # Arginine
'AGG' : 'R', # Arginine
'GTA' : 'V', # Valine
'GTC' : 'V', # Valine
'GTG' : 'V', # Valine
'GTT' : 'V', # Valine
'GCA' : 'A', # Alanine
'GCC' : 'A', # Alanine
'GCG' : 'A', # Alanine
'GCT' : 'A', # Alanine
'GAC' : 'D', # Aspartic Acid
'GAT' : 'D', # Aspartic Acid
'GAA' : 'E', # Glutamic Acid
'GAG' : 'E', # Glutamic Acid
'GGA' : 'G', # Glycine
'GGC' : 'G', # Glycine
'GGG' : 'G', # Glycine
'GGT' : 'G', # Glycine
}
def read_genome(file):
lines = open(file, "r")
genome = ""
for line in lines:
genome += line.strip()
lines.close()
#print('THIS IS THE GENOME: ' + genome)
return genome
list_of_orfs = []
def get_orfs(genome):
if set(genome) != set('ACGT'):
raise TypeError('Sequence does not look like DNA.')
for i in range(0,3):
getOrfFromStartingPosition(genome, i)
genome = getComplementaryGenome(genome[::-1])
getOrfFromStartingPositionComplement(genome, i)
genome = getComplementaryGenome(genome[::-1])
return list_of_orfs
def getComplementaryGenome(genomee):
switch = {
'A': 'T',
'T': 'A',
'G': 'C',
'C': 'G',
'ATGC': 'TACG',
'CGTA': 'GCAT',
'AGTCATCGTTAGGCCAT': 'TCAGTAGCAATCCGGTA',
}
complGenome = ""
for i in range(0, len(genomee)):
complGenome += switch.get(genomee[i])
return complGenome
def getOrfFromStartingPosition(genome, startingIndex):
indexFirst = 0
numOfStarts = 0
collectedCodons = ""
for i in range(startingIndex, len(genome) - 2, 3):
codon = codon_dict[genome[i:i+3]]
if codon == '_':
numOfStarts = 0
indexSecond = i + 2
if len(collectedCodons) >= 34:
theTuple = tuple((indexFirst, indexSecond, collectedCodons, False))
list_of_orfs.append(theTuple)
collectedCodons = ""
i += 1
if numOfStarts == 1:
collectedCodons = collectedCodons + codon
if codon == "M" and numOfStarts == 0:
indexFirst = i
numOfStarts = 1
collectedCodons = codon
def getOrfFromStartingPositionComplement(genome, startingIndex):
indexFirst = 0
numOfStarts = 0
collectedCodons = ""
for i in range(startingIndex, len(genome) - 2, 3):
codon = codon_dict[genome[i:i + 3]]
if codon == '_':
numOfStarts = 0
indexSecond = i + 2
if len(collectedCodons) >= 34:
indexFirst = len(genome) - indexFirst - 1
indexSecond = len(genome) - indexSecond - 1
theTuple = tuple((indexFirst, indexSecond, collectedCodons, True))
list_of_orfs.append(theTuple)
collectedCodons = ""
i += 1
if numOfStarts == 1:
collectedCodons = collectedCodons + codon
if codon == "M" and numOfStarts == 0:
indexFirst = i
numOfStarts = 1
collectedCodons = codon
<file_sep>import numpy as np
import math
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
self.sequences = sequences
if len(sequences) == 0:
raise TypeError('Invalid MSA')
seqLen = len(sequences[0])
for i in range(1, len(sequences)):
if seqLen != len(sequences[i]):
raise TypeError('Invalid MSA')
for x in range(0, len(sequences[i])):
if sequences[i][x] not in ALPHABET:
raise TypeError('Invalid MSA: ', sequences[i][x], ' not in alphabet')
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
pass
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
# Sequence weights: self.weights
numberOfSequences = len(self.sequences)
lengthOfSequences = len(self.sequences[0])
pssm = np.zeros((len(self.get_primary_sequence()), 20), np.int64)
pssmWithGaps = np.zeros((lengthOfSequences, 21), np.float)
# Count Residues
if use_sequence_weights == False:
for x in range(0, numberOfSequences):
for y in range(0, lengthOfSequences):
positionInPssm = ALPHABET.find(self.sequences[x][y])
# print(x,y,self.sequences[x][y], positionInPssm, lengthOfSequences, len(self.get_primary_sequence()))
pssmWithGaps[y][positionInPssm] += 1
# Incoperate weights
else:
for x in range(0, numberOfSequences):
for y in range(0, lengthOfSequences):
positionInPssm = ALPHABET.find(self.sequences[x][y])
# print(x,y,self.sequences[x][y], positionInPssm, lengthOfSequences, len(self.get_primary_sequence()))
pssmWithGaps[y][positionInPssm] += self.weights[x]
#print('count', pssmWithGaps)
# recalculate bg_matrix to bg_vector
bg_vector = []
if bg_matrix != None:
bg_vector = np.sum(bg_matrix, axis=0)
# Redistributing Gaps
if redistribute_gaps is not False:
for x in range(0, lengthOfSequences):
if bg_matrix is None:
amountToAdd = pssmWithGaps[x][20] * 0.05
for y in range(0, 20):
pssmWithGaps[x][y] += amountToAdd
else:
for y in range(0, 20):
f1aa = AA_TO_INT[self.sequences[0][x]]
f2aa = y
if f1aa != GAP_INDEX:
amountToAdd = pssmWithGaps[x][20] * bg_vector[f2aa]
pssmWithGaps[x][y] += amountToAdd
#print('with gaps', pssmWithGaps)
# Add weightes pseudocounts
if add_pseudocounts != False:
pseudocountMatrix = np.zeros((lengthOfSequences, 20), np.float)
# Use default Background frequency
if bg_matrix == None:
for x in range(0, lengthOfSequences):
for y in range(0, 20):
matrixCount = 0
for y2 in range(0, 20):
matrixCount += (pssmWithGaps[x][y2] / 0.05) * 0.0025
pseudocountMatrix[x][y] = matrixCount
# Use Matrix
else:
for x in range(0, lengthOfSequences):
f2aa = AA_TO_INT[self.sequences[0][x]]
if f2aa == GAP_INDEX:
continue
for y in range(0, 20):
matrixCount = 0
for y2 in range(0, 20):
matrixCount += (pssmWithGaps[x][y2] / bg_vector[y2]) * bg_matrix[y][y2]
pseudocountMatrix[x][y] = matrixCount
# Put into matrix
alpha = self.get_number_of_observations() - 1
for x in range(0, lengthOfSequences):
for y in range(0, 20):
f2aa = AA_TO_INT[self.sequences[0][x]]
if f2aa != GAP_INDEX:
pssmWithGaps[x][y] = (alpha * pssmWithGaps[x][y] + beta * pseudocountMatrix[x][y]) / (alpha + beta)
# Normalize relative frequencies (divide by row sum)
# Dont count gaps
sumOfCol = np.sum(a=pssmWithGaps[:,:20], axis=1)
#print(sumOfCol)
for x in range(0, lengthOfSequences):
for y in range(0, 20):
if pssmWithGaps[x][y] != 0.0:
pssmWithGaps[x][y] = (pssmWithGaps[x][y] / sumOfCol[x])
# print('normalize', pssmWithGaps)
# Divide by background frequency
if bg_matrix is None:
print('none')
for x in range(0, lengthOfSequences):
for y in range(0, 20):
if pssmWithGaps[x][y] != 0.0:
pssmWithGaps[x][y] /= 0.05
else:
for x in range(0, lengthOfSequences):
for y in range(0, 20):
if pssmWithGaps[x][y] != 0.0:
f1aa = AA_TO_INT[self.sequences[0][x]]
f2aa = y
if f1aa != GAP_INDEX:
# print(x, y, pssmWithGaps[x][y], bg_matrix[f2aa][f1aa])
# print('x:',x,'y:',y, 'pssm:',pssmWithGaps[x][y], 'matrix: ', bg_matrix[f2aa][f1aa], 'result', pssmWithGaps[x][y] / bg_matrix[f2aa][f1aa], self.sequences[0][x], f1aa, ALPHABET[y], f2aa)
pssmWithGaps[x][y] /= bg_vector[f2aa]
# print('frequency', pssmWithGaps)
# Transform with log
for x in range(0, lengthOfSequences):
for y in range(0, 21):
if pssmWithGaps[x][y] == 0.0:
pssmWithGaps[x][y] = -20
else:
pssmWithGaps[x][y] = 2 * math.log(pssmWithGaps[x][y], 2)
#print('logtransform', pssmWithGaps)
# Round to nearest int
for x in range(0, lengthOfSequences):
for y in range(0, 21):
pssmWithGaps[x][y] = int(round(pssmWithGaps[x][y]))
# Remove rows corresponding to gaps in primary sequence
countX = 0
for x in range(0, lengthOfSequences):
if self.sequences[0][x] == '-':
continue
for y in range(0, 20):
pssm[countX][y] = pssmWithGaps[x][y]
countX += 1
print('final', pssm)
return np.rint(pssm).astype(np.int64)
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return (len(self.sequences), len(self.sequences[0]))
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
return self.sequences[0].replace('-', '')
def get_amount_of_different_amino(self, x, y):
uniqueCounter = 0
uniqueString = ''
for i in range(0, len(self.sequences)):
if self.sequences[i][y] not in uniqueString:
uniqueCounter += 1
uniqueString += self.sequences[i][y]
return uniqueCounter
def get_single_weight(self, x, y):
sumOfSame = 0
toFind = self.sequences[x][y]
for i in range(0, len(self.sequences)):
if self.sequences[i][y] == toFind:
sumOfSame += 1
return 1 / (self.get_amount_of_different_amino(x, y) * sumOfSame)
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
self.wikMatrix = np.zeros(self.get_size(), np.float64)
numberOfSequences = len(self.sequences)
lengthOfSequences = len(self.sequences[0])
for x in range(0, numberOfSequences):
for y in range(0, lengthOfSequences):
self.wikMatrix[x][y] = self.get_single_weight(x, y)
weightSize = numberOfSequences
weights = np.zeros(weightSize)
for x in range(0, weightSize):
sum = 0
for y in range(0, lengthOfSequences):
if self.get_amount_of_different_amino(x, y) > 1:
sum += self.wikMatrix[x][y]
weights[x] = sum
self.weights = weights.astype(np.float64)
return weights
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
sum = 0
msaColumns = len(self.sequences[0])
for i in range(0, msaColumns):
sum += self.get_amount_of_different_amino(0, i)
num_obs = np.float64((1 / msaColumns) * sum)
return num_obs
msa = MSA(["SE-AN", "SE-ES", "SEVEN", "SE-AS"])
msa.get_pssm(add_pseudocounts=True)<file_sep>AUTHOR: <NAME>
2019
0. For each excercise save the names of the students which participated in a .csv file
Save as 'students_<#ex>.csv (export names in Artemis). run downloadCode.sh <username> <password>
after adjusting the excercises variable in that script.
2. Adjust the followinf vars in test.sh:
<files_x>
3. Run test.sh
5. Run checkCode.sh
Now, The moss results can be found in moss_results.txt and a list of the links in
moss_links.txt
TESTED:
-Renamed single variable
-Reordered declarations of vars
-Reordered functions and Code-parts
<file_sep>#!/bin/bash
# This file checks the code for plagiats
dirs=(1)
#create results directory
if [ ! -d '../results' ]; then
mkdir '../results'
fi
#perform moss search and save results in moss_results.txt file
for d in ${dirs[@]}; do
if [ -d "../results/ex${d}" ]; then
truncate -s 0 results/ex${d}/moss_results.txt
fi
if [ ! -d "../results/ex${d}" ]; then
mkdir "../results/ex${d}"
touch ../results/ex${d}/moss_results.txt
touch ../results/ex${d}/moss_links.txt
fi
echo ${d}
cd repos/${d}
codefiledirs=($(find collected_files -maxdepth 1 -mindepth 1 -type d -printf '%f\n'))
cd ../..
echo ${d} >> ../results/ex${d}/moss_results.txt
echo '#########' >> ../results/ex${d}/moss_results.txt
for f in ${codefiledirs[@]}; do
echo ${f} >> ../results/ex${d}/moss_results.txt
cd ..
templatefile="additional_info/templates/${d}/${f}.py"
if [ -f $templatefile ]; then
echo "$templatefile serves as basefile"
./moss -l python -b ${templatefile} codechecker/repos/${d}/collected_files/${f}/*.py >> results/ex${d}/moss_results.txt
else
./moss -l python codechecker/repos/${d}/collected_files/${f}/*.py >> results/ex${d}/moss_results.txt
fi
cd codechecker
echo ' ' >> ../results/ex${d}/moss_results.txt
done
echo ' ' >> ../results/ex${d}/moss_results.txt
#Aggregate links in moss_links.txt
truncate -s 0 ../results/ex${d}/moss_links.txt
grep "http://" ../results/ex${d}/moss_results.txt >> ../results/ex${d}/moss_links.txt
#Generate the Mossum-Graphs in '../results' directory
links_list=""
while IFS= read -r line # concatenate all links as string
do
echo $line
links_list="$links_list $line"
echo $links_list
done < "../results/ex${d}/moss_links.txt"
cd ../mossum/mossum
python mossum.py -p 70 -t ".*/(.+).py" -m -o ../../results/ex${d}/accumulated_result $links_list
cd ../../codechecker
#Generate mossum graph for one whole excercise (including all single files)
IFS=' ' read -r -a array <<< "$links_list"
cd repos/${d}
codefiledirs=($(find collected_files -maxdepth 1 -mindepth 1 -type d -printf '%f\n'))
cd ../..
i=0
for f in ${codefiledirs[@]}; do
cd ../mossum/mossum
echo ${array[$i]}
python mossum.py -p 70 -t ".*/(.+).py" -o ../../results/ex${d}/$f ${array[$i]}
i=$i+1
cd ../../codechecker
done
done
<file_sep># -*- coding: utf-8 -*-
"""
IMPORTANT!:
Before writing an email asking questions such as
'What does this input has to be like?' or
'What return value do you expect?' PLEASE read our
exercise sheet and the information in this template
carefully.
If something is still unclear, PLEASE talk to your
colleagues before writing an email!
If you experience technical issues or if you find a
bug we are happy to answer your questions. However,
in order to provide quick help in such cases we need
to avoid unnecessary emails such as the examples
shown above.
"""
import sys
sys.path.insert(0, "/home/shalvi/.local/lib/python3.7/site-packages/Bio")
from Bio.PDB.MMCIFParser import MMCIFParser # Tip: This module might be useful for parsing...
from Bio.PDB.Polypeptide import is_aa
from Bio.PDB.Polypeptide import three_to_one
import numpy as np
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
from Bio.PDB.Polypeptide import PPBuilder
chainlist=[]
############# Exercise 2: Protein Data Bank #############
# General remark: In our exercise every structure will have EXACTLY ONE m.
# This is true for nearly all X-Ray structures. NMR structures have several ms.
class PDB_Parser:
CIF_PARSER = MMCIFParser() # parser object for reading in structure in CIF format
def __init__( self, path ):
'''
Initialize every PDB_Parser with a path to a structure-file in CIF format.
An example file is included in the repository (7ahl.cif).
Tip: Store the parsed structure in an object variable instead of parsing it
again & again ...
'''
parser = MMCIFParser()
self.structure = parser.get_structure(structure_id='7ahl', filename=path) # Parse the structure once and re-use it in the functions below
# 3.8 Chains
def get_number_of_chains( self ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
Return:
Number of chains in this structure as integer.
'''
n_chains = 0
for m in self.structure:
for chain in m:
n_chains += 1
return n_chains
# 3.9 Sequence
def get_sequence( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the amino acid sequence (single-letter alphabet!) of a given chain (chain_id)
in a Biopython.PDB structure as a string.
'''
for m in self.structure:
seq = list()
chain = m[chain_id]
for resid1 in chain:
if is_aa(resid1.get_resname(), standard=True):
seq.append(three_to_one(resid1.get_resname()))
sequence = str("".join(seq))
return sequence
# 3.10 Water molecules
def get_number_of_water_molecules( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the number of water molecules of a given chain (chain_id)
in a Biopython.PDB structure as an integer.
'''
n_waters = 0
for m in self.structure:
chain = m[chain_id]
for resid in chain:
if resid.get_id()[0] == 'W':
n_waters += 1
return n_waters
def _translate_id(self, id):
if isinstance(id, int):
id = (' ', id, ' ')
return id
def get_ca_distance( self, chain_id_1, index_1, chain_id_2, index_2 ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id_1 : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
index_1 : index of a residue in a given chain in a Biopython.PDB structure
chain_id_2 : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
index_2 : index of a residue in a given chain in a Biopython.PDB structure
chain_id_1 and index_1 describe precisely one residue in a PDB structure,
chain_id_2 and index_2 describe the second residue.
Return:
Return the C-alpha (!) distance between the two residues, described by
chain_id_1/index_1 and chain_id_2/index_2. Round the returned value via int().
The reason for using two different chains as an input is that also the distance
between residues of different chains can be interesting.
Different chains in a PDB structure can either occur between two different proteins
(Heterodimers) or between different copies of the same protein (Homodimers).
'''
ca_coordinates1 = np.array([])
ca_coordinates2 = np.array([])
for model in self.structure:
chain1 = model[chain_id_1]
chain2 = model[chain_id_2]
residue1 = chain1[index_1]
residue2 = chain2[index_2]
for atom1 in residue1:
if atom1.get_name() == 'CA':
ca_coordinates1 = atom1.get_vector()
for atom2 in residue2:
if atom2.get_name() == 'CA':
ca_coordinates2 = atom2.get_vector()
#for atom in residue1:
#print(residue1['CA'].get_vector())
#CA_coordinates1 = np.append(CA_coordinates1,residue1['CA'].get_vector())
#print(CA_coordinates1)
#print(CA_coordinates2)
ca_distance = np.linalg.norm(ca_coordinates1 - ca_coordinates2)
return int(ca_distance)
# 3.11 C-Alpha distance
def get_ca_distance1( self, chain_id_1, id_1, chain_id_2, id_2 ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id_1 : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
id_1 : id of a resid in a given chain in a Biopython.PDB structure
chain_id_2 : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
id_2 : id of a resid in a given chain in a Biopython.PDB structure
chain_id_1 and id_1 describe precisely one resid in a PDB structure,
chain_id_2 and id_2 describe the second resid.
Return:
Return the C-alpha (!) distance between the two resids, described by
chain_id_1/id_1 and chain_id_2/id_2. Round the returned value via int().
The reason for using two different chains as an input is that also the distance
between resids of different chains can be interesting.
Different chains in a PDB structure can either occur between two different proteins
(Heterodimers) or between different copies of the same protein (Homodimers).
'''
resid1=0
resid2=0
ca_coordinates1 = []
ca_coordinates2 =[]
for model in self.structure:
chain1 = model[chain_id_1]
chain2 = model[chain_id_2]
i=1
for residue in chain1.child_list:
if i == id_1:
resid1=residue
#chainlist.append(resid1)
break
i+=1
i=1
for residue in chain2.child_list:
if i == id_2:
resid2=residue
break
i+=1
#print("vddvvd",len(resid1))
for atom1 in resid1:
if atom1.name == 'CA':
ca_coordinates1 = atom1.coord
break
for atom2 in resid2:
if atom2.name == 'CA':
ca_coordinates2 = atom2.coord
break
ca_distance = np.linalg.norm(ca_coordinates1 - ca_coordinates2)
return int(ca_distance)
# 3.12 Contact Map
def get_contact_map( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return a complete contact map (see description in exercise sheet)
for a given chain in a Biopython.PDB structure as numpy array.
The values in the matrix describe the c-alpha distance between all resids
in a chain of a Biopython.PDB structure.
Only integer values of the distance have to be given (see below).
'''
chain=[]
for m in self.structure:
chain = m[chain_id]
import numpy as np
length = len(chain)-self.get_number_of_water_molecules(chain_id)
contact_map = np.zeros((length,length), dtype=np.float32 )
for i in range(length):
for j in range(length):
contact_map[i][j] = self.get_ca_distance1(chain_id,i+1,chain_id,j+1)
return contact_map.astype( np.int ) # return rounded (integer) values
# 3.13 B-Factors
def get_bfactors( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the B-Factors for all resids in a chain of a Biopython.PDB structure.
The B-Factors describe the mobility of an at or a resid.
In a Biopython.PDB structure B-Factors are given for each at in a resid.
Calculate the mean B-Factor for a resid by averaging over the B-Factor
of all ats in a resid.
Sometimes B-Factors are not available for a certain resid;
(e.g. the resid was not resolved); insert np.nan for those cases.
Finally normalize your B-Factors using Standard scores (zero mean, unit variance).
You have to use np.nanmean, np.nanvar etc. if you have nan values in your array.
The returned data structure has to be a numpy array rounded again to integer.
'''
import numpy as np
protein_seq = self.get_sequence(chain_id)
length = len(protein_seq)
arr = np.zeros(length, dtype=np.float32)
ctr = 0
for m in self.structure:
chain = m[chain_id]
for resid in chain.get_list():
val = 0
val_cnt = 0
insert = False
if is_aa(resid.get_resname(), standard=True):
insert = True
for at in resid:
val_cnt += 1
val += at.get_bfactor()
if insert:
arr[ctr] = val/val_cnt
#print(arr[ctr], ctr)
ctr += 1
print(arr)
arr = (arr-np.mean(arr))/np.std(arr)
b_factors = np.array(arr, dtype=np.float32)
return b_factors.astype(np.int) # return rounded (integer) values
def main():
print('PDB parser class.')
p = PDB_Parser('7ahl.cif')
#print(p.get_number_of_chains())
#print(p.get_sequence('A'))
#print(p.get_number_of_water_molecules('A'))
#print(p.get_ca_distance('A',1,'B',1))
#p.get_bfactors('A')
p.get_contact_map('A')
return None
if __name__ == '__main__':
main()
<file_sep>import numpy as np
from collections import Counter
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
self.sequences = sequences
self.aa_counts_at_position = []
if not self.are_sequences_valid():
raise TypeError('invalid sequences provided')
self.pre_compute_aa_counts()
def are_sequences_valid(self):
if len(self.sequences) < 1:
return False # wrong number of sequences
length = len(self.sequences[0])
for s in self.sequences:
if len(s) != length:
return False # wrong sequence length
for aa in s:
if aa not in ALPHABET:
return False # invalid character
return True
def pre_compute_aa_counts(self):
# compute aa and gap count for every position --> self.aa_counts_at_position is a list of counters
length = len(self.sequences[0])
for pos in range(length): # for every position
aa_count = Counter() # count aas and gaps at given position
for seq in self.sequences:
aa_count.update(seq[pos])
self.aa_counts_at_position.append(aa_count)
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
num_seq, length = self.get_size()
# background frequencies
if bg_matrix is not None:
bg_frequencies = np.sum(bg_matrix, axis=0)
else:
bg_frequencies = np.full(20, 1 / 20) # 0.05 uniform probability for every aa
# weights
if use_sequence_weights:
weights = self.get_sequence_weights()
else:
weights = np.ones(num_seq)
weighted_counts = self.get_weighted_counts(weights)
pssm = weighted_counts[:, :-1] # cut gaps
# redistribute gaps
if redistribute_gaps:
for pos in range(length):
gap_count = weighted_counts[pos][GAP_INDEX]
pssm[pos] += bg_frequencies * gap_count
# Add weighted pseudocounts
if add_pseudocounts:
pseudocounts = np.zeros_like(pssm)
for pos in range(length):
for a in range(20):
g_i_a = 0
for j in range(20):
substitution_frequency = 0.0025 if bg_matrix is None else bg_matrix[j][a]
g_i_a += pssm[pos][j] / bg_frequencies[j] * substitution_frequency
pseudocounts[pos][a] = g_i_a
alpha = self.get_number_of_observations() - 1 # = n - 1
pssm = pssm * alpha + pseudocounts * beta
pssm /= alpha + beta
# normalize by aa counts and background frequencies
aa_count_at_position = np.sum(pssm, axis=1, keepdims=True) # sum aa counts only, exclude gap counts
pssm /= aa_count_at_position # normalize (divide by row sum)
pssm /= bg_frequencies # background frequencies
# keep only rows where primary sequence is not a gap
indices = list(map(lambda aa: AA_TO_INT[aa] != GAP_INDEX, self.sequences[0]))
pssm = pssm[indices]
# scores: 2 * log_2, except entry is 0 (then return -20)
safe_log = np.vectorize(lambda x: np.log2(x) * 2 if x != 0 else -20)
pssm = safe_log(pssm)
return np.rint(pssm).astype(np.int64)
def get_weighted_counts(self, weights):
# position specific aa-and-gap counts weighted by sequence weights
num_seq, length = self.get_size()
weighted_counts = np.zeros((length, 21))
for pos in range(length):
for seq_id in range(num_seq):
seq = self.sequences[seq_id]
aa = seq[pos]
aa_id = AA_TO_INT[aa]
weighted_counts[pos][aa_id] += weights[seq_id]
return weighted_counts
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return len(self.sequences), len(self.sequences[0])
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
return self.sequences[0].replace('-', '')
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
num_seq, length = self.get_size()
weights = np.zeros((num_seq, length))
for pos in range(length): # for every position
aa_count = self.aa_counts_at_position[pos]
r = len(aa_count) # number of different aas
if r < 2:
continue # skip positions with the same letter
for s_id in range(num_seq): # get weights
s = aa_count[self.sequences[s_id][pos]]
weights[s_id][pos] = 1 / (r * s)
return np.sum(weights, axis=1).astype(np.float64) # sum weights (position specific -> whole sequence)
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
num_seq, length = self.get_size()
r_sum = 0
for pos in range(length):
aa_count = self.aa_counts_at_position[pos]
r_sum += len(aa_count) # r = number of different aas at position pos
num_obs = r_sum / length
return num_obs
<file_sep>##############
# Exercise 2.7
##############
posCharge = ['R','K','H']
negCharge = ['D','E']
arom = ['H','F','W','Y']
polar = ['R','Y','T','S','K','H','E','Q','D','N']
hydrophob = ['A','I','L','W','Y','M','F','V']
sulfur = ['M','C']
acid = ['E','D']
basic = ['R','H','K']
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
if aa in posCharge:
return True
else:
return False
def isNegativelyCharged(aa):
if aa in negCharge:
return True
else:
return False
def isHydrophobic(aa):
if aa in hydrophob:
return True
else:
return False
def isAromatic(aa):
if aa in arom:
return True
else:
return False
def isPolar(aa):
if aa in polar:
return True
else:
return False
def isProline(aa):
return aa == 'P'
def containsSulfur(aa):
if aa in sulfur:
return True
else:
return False
def isAcid(aa):
if aa in acid:
return True
else:
return False
def isBasic(aa):
if aa in basic:
return True
else:
return False
<file_sep>##############
# Exercise 2.6
##############
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
self.__abs_frequencies = {}
def get_counts(self):
return len(self.__sequences)
def get_average_length(self):
sequence_len_sum = 0
for seq in self.__sequences:
sequence_len_sum += len(seq)
return ((sequence_len_sum * 1.0)/self.get_counts())
def read_fasta(self, path):
fasta_file = open(path, "r")
fasta_content = fasta_file.readlines()
fasta_file.close()
seq = ""
for line in fasta_content:
if line.startswith(">"):
if seq is not "":
self.__sequences.append(seq)
seq = ""
elif not line.startswith(";"):
seq += line.rstrip("\n\r")
self.__sequences.append(seq)
def get_abs_frequencies(self):
# return number of occurences not normalized by length
for seq in self.__sequences:
for aa in seq:
if aa in self.__abs_frequencies:
self.__abs_frequencies[aa] += 1
else:
self.__abs_frequencies[aa] = 1
return self.__abs_frequencies
def get_av_frequencies(self):
# return number of occurences normalized by length
abs_freqs = self.get_abs_frequencies()
aa_abs_number = 0
for key in abs_freqs.keys():
aa_abs_number += abs_freqs[key]
av_freqs = abs_freqs.copy()
for key in av_freqs.keys():
av_freqs[key] /= aa_abs_number * 1.0
return av_freqs
<file_sep>##############
# Exercise 2.6
##############
from collections import Counter
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.header_list = []
self.sequence_list = []
self.read_fasta(filepath)
def get_counts(self):
return (len(self.sequence_list))
def get_average_length(self):
total=0
for i in self.sequence_list:
total += len(i)
return total/float(len(self.sequence_list))
def read_fasta(self, path):
with open(path, 'r') as ff:
sequence=""
flag = 0
for line in ff:
line = line.strip()
if (line.startswith('>') or line.startswith(';')):
self.header_list.append(line)
if (flag == 1):
self.sequence_list.append(sequence)
sequence=""
continue
else:
sequence += line.split('*')[0]
flag = 1
self.sequence_list.append(sequence)
def get_abs_frequencies(self):
all_proteins =''
for i in self.sequence_list:
all_proteins += i
frequency = Counter(all_proteins)
return frequency
def get_av_frequencies(self):
cont = Counter()
cont2 = Counter()
cont3 = Counter()
for i in self.sequence_list:
cont = Counter(i)
for j in cont:
cont2[j] = cont[j]/len(i)
cont3 += cont2
return cont3<file_sep>import numpy as np
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
if len(sequences) < 1:
raise TypeError("At least one sequence.")
if not all(len(s) == len(sequences[0]) for s in sequences):
raise TypeError("Different length.")
if not all(all(aa in ALPHABET for aa in s) for s in sequences):
raise TypeError("Alphabet.")
# transpose sequences
sequences_t = [''.join(s) for s in zip(*sequences)]
# compute r
self.r = np.zeros((len(sequences_t), len(sequences_t[0])))
for i in range(len(sequences_t)):
self.r[i] = len(set(sequences_t[i]))
# compute s
self.s = np.zeros((len(sequences_t), len(sequences_t[0])))
for i in range(len(sequences_t)):
for k in range(len(sequences_t[0])):
self.s[i, k] = len([aa for aa in sequences_t[i] if aa == sequences_t[i][k]])
self.sequences = sequences
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
# uniform background freqs
bg_matrix = bg_matrix or 0.0025 * np.ones((20, 20))
bg_freqs = np.ones((len(self.sequences[0]), 20))
for i in range(bg_freqs.shape[0]):
for j in range(bg_freqs.shape[1]):
aa = ALPHABET[j]
bg_freqs[i, j] *= np.sum(bg_matrix[AA_TO_INT[aa]])
# weights
weights = self.get_sequence_weights() if use_sequence_weights else np.ones(len(self.sequences))
# observed freqs with weights
freqs = np.zeros((len(self.sequences[0]), 21))
for i in range(freqs.shape[0]):
for j in range(freqs.shape[1]):
for k in range(len(self.sequences)):
freqs[i, j] += weights[k] if self.sequences[k][i] == INT_TO_AA[j] else 0
# redistribute gaps
gaps = np.repeat(np.expand_dims(freqs[:, -1], axis=1), (freqs.shape[1] - 1), axis=1) if redistribute_gaps else np.zeros((freqs.shape[0], freqs.shape[1] - 1))
pssm = freqs[:, :-1] + gaps * bg_freqs
# add pseudocount
g = np.zeros((len(self.sequences[0]), 20))
for i in range(g.shape[0]):
for a in range(g.shape[1]):
g[i, a] = 0
for j in range(len(bg_matrix)):
g[i, a] += (pssm[i, j] * bg_matrix[j][a])/ bg_freqs[i, j]
N = self.get_number_of_observations()
pssm = pssm if not add_pseudocounts else ((N - 1) * pssm + beta * g) / (N - 1 + beta)
# normalize
pssm = pssm / np.sum(pssm, axis=1, keepdims=True)
# divide
pssm /= bg_freqs
# log-score
pssm = 2 * np.log2(pssm)
pssm[~np.isfinite(pssm)] = -20
# remove row
indices = []
for i in range(pssm.shape[0]):
if self.sequences[0][i] != '-':
indices.append(i)
pssm = pssm[indices]
return np.rint(pssm).astype(np.int64)
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return (len(self.sequences), len(self.sequences[0]))
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
return self.sequences[0].replace('-', '')
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
weights = np.sum((1 / (self.r * self.s)) * (self.r > 1), axis=0)
return weights.astype(np.float64)
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
num_obs = np.sum(self.r, axis=0)[0] / self.r.shape[0]
return num_obs.astype(np.float64)
"""
if __name__ == "__main__":
ss = ["SE-AN", "SE-ES", "SEVEN", "SE-AS"]
a = MSA(ss)
print(a.get_size())
print(a.get_primary_sequence())
print(a.get_sequence_weights())
print(a.r)
print(a.get_number_of_observations())
print(a.get_pssm())
"""
<file_sep>import json
import os
import sys
import numpy as np
from tests.matrices import MATRICES
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.direction_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
# To store result from which row and col, the local alignment happened
self.col_index_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.row_index_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.align()
print('string1', self.string1)
print('string2', self.string2)
np.set_printoptions(threshold=sys.maxsize)
print(np.array_repr(self.score_matrix).replace('\n', '').replace(']', ']\n'))
print(np.array_repr(self.direction_matrix).replace('\n', ''))
def align(self):
"""
Align given strings using the Smith-Waterman algorithm.
NB: score matrix and the substitution matrix are different matrices!
"""
self.score_matrix[0, :] = 0
self.score_matrix[:, 0] = 0
for row, char2 in enumerate(self.string2):
for col, char1 in enumerate(self.string1):
# Match Score
match_score = self.substitution_matrix[char2][char1] + \
self.score_matrix[row][col] # score-matrix is 1 indexed
# Open Gap in string 2
column_scores = self.score_matrix[:, col + 1]
indices = np.arange(0, len(self.string2)+1)
distance = np.abs(indices - (row+1))
penalty = distance * self.gap_penalty
penalized_column_scores = column_scores + penalty
max_col_score = np.max(penalized_column_scores)
max_col_score_index = np.argmax(penalized_column_scores)
# Open Gap in string 1
row_scores = self.score_matrix[row + 1, :]
indices = np.arange(0, len(self.string1) + 1)
distance = np.abs(indices - (col + 1))
penalty = distance * self.gap_penalty
penalized_row_scores = row_scores + penalty
max_row_score = np.max(penalized_row_scores)
max_row_score_index = np.argmax(penalized_row_scores)
max_score = max(0, match_score, max_col_score, max_row_score)
self.score_matrix[row + 1, col + 1] = max_score
# Up - 001 - 1
# UpLeft - 010 - 2
# Left - 100 - 4
direction_flag = 0
if max_score > 0:
if max_score == match_score:
direction_flag = direction_flag | 2
elif max_score == max_row_score:
direction_flag = direction_flag | 4
self.row_index_matrix[row + 1, col + 1] = max_row_score_index
elif max_score == max_col_score:
direction_flag = direction_flag | 1
self.col_index_matrix[row + 1, col + 1] = max_col_score_index
self.direction_matrix[row+1, col+1] = direction_flag
def has_alignment(self):
"""
:return: True if a local alignment has been found, False otherwise
"""
return np.max(self.score_matrix) > 0
def get_alignment(self):
"""
:return: alignment represented as a tuple of aligned strings
"""
""" Find highest score location """
max_value = np.amax(self.score_matrix)
if max_value == 0:
return '', ''
start_row, start_col = np.where(self.score_matrix == max_value)
start_row = start_row[0]
start_col = start_col[0]
alignment = ('', '')
self.end_string1 = start_col
self.end_string2 = start_row
current_row = start_row
current_col = start_col
del start_col, start_row
while True:
dir = self.direction_matrix[current_row, current_col]
# Up - 001 - 1
# UpLeft - 010 - 2
# Left - 100 - 4
if dir == 1:
new_row = self.col_index_matrix[current_row, current_col]
string1_prefix = len(self.string2[new_row:current_row]) * '-'
string2_prefix = self.string2[new_row:current_row]
elif dir == 2:
new_row = current_row - 1
new_col = current_col - 1
string1_prefix = self.string1[current_col-1]
string2_prefix = self.string2[current_row-1]
elif dir == 4:
new_col = self.row_index_matrix[current_row, current_col]
string1_prefix = self.string1[new_col:current_col]
string2_prefix = len(self.string1[new_col:current_col]) * '-'
else:
break
alignment = (string1_prefix + alignment[0], string2_prefix + alignment[1])
new_score = self.score_matrix[new_row, new_col]
if new_score == 0:
break
current_row = new_row
current_col = new_col
self.start_string1 = current_col
self.start_string2 = current_row
return alignment
def is_residue_aligned(self, string_number, residue_index):
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been aligned
False otherwise
"""
alignment = self.get_alignment()
residue_index += 1
if string_number == 1:
if self.start_string1 <= residue_index <= self.end_string1:
return True
elif string_number == 2:
if self.start_string2 <= residue_index <= self.end_string2:
return True
return False
if __name__ == '__main__':
relative_path = os.path.dirname(__file__)
with open(os.path.join(relative_path, 'tests/local_test.json')) as json_file:
json_data = json.load(json_file)
short_la = LocalAlignment(
*json_data['short']['strings'],
json_data['short']['gap_penalty'],
MATRICES[json_data['short']['matrix']]
)
short_la.get_alignment()
<file_sep>import numpy as np
import re
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
msa = []
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
if len(sequences) == 0:
raise TypeError("Given sequence cannott be null")
first_seq_len = len(sequences[0])
for string in sequences:
if bool(re.match('^['+ALPHABET[::-1]+ALPHABET.lower()+']+$', string)) is not True: # check if gnome is valid DNA sequence
raise TypeError("Invalid Amino Acid Sequence")
if(len(string) != first_seq_len):
raise TypeError("All the sequence strings must be of same length")
self.msa = sequences
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
primary_sequence = self.get_primary_sequence()
no_cols = 20
no_rows = len(primary_sequence)
pssm = np.zeros((no_rows, no_cols))
if(bg_matrix == None):
bg_matrix = np.zeros((no_rows, no_cols)) + (1/20)
for i in no_rows:
for j in no_cols:
pssm[i][j]
return np.rint(pssm).astype(np.int64)
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return (len(self.msa), len(self.msa[0]))
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
primary_seq = ""
for seq in self.msa:
if "-" not in seq:
primary_seq = seq
break
return self.msa[0].replace("-","")
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
size = self.get_size()
no_of_sequence = size[0]
sequence_len = size[1]
r=np.zeros(sequence_len)
s=np.zeros((no_of_sequence, sequence_len))
# for i in range(0,no_of_sequence):
for i in range(0,sequence_len):
i_string = "".join(self.msa[j][i] for j in range(0,no_of_sequence))
r[i] = len(set(i_string))
for j in range(0,no_of_sequence):
s[j,i] = i_string.count(self.msa[j][i])
unprop_weights = s*r
weights = np.zeros(no_of_sequence)
for i in range(0, no_of_sequence):
w = 0
for j in range(0, sequence_len):
if(r[j] != 1):
w += 1 / unprop_weights[i][j];
weights[i] = w
return weights.astype(np.float64)
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
size = self.get_size()
no_of_sequence = size[0]
sequence_len = size[1]
r = np.zeros(sequence_len)
for i in range(0,sequence_len):
i_string = "".join(self.msa[j][i] for j in range(0,no_of_sequence))
r[i] = len(set(i_string))
num_obs = r.sum() / sequence_len
return num_obs.astype(np.float64)
# test = MSA([
# "MK-AN",
# "MK-KM",
# "MKTKN",
# "MK-AM",
# ])
#
# print(test.get_sequence_weights())
<file_sep>##############
# Exercise 2.6
##############
from collections import Counter
from typing import Dict, Tuple, List
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self) -> int:
# counts the number of sequences parsed from FASTA
return len(self.__sequences)
def total_len(self) -> int:
# returns total length of sequences
total_len = 0
for seq in self.__sequences:
total_len += len(seq)
return total_len
def get_average_length(self):
# returns average length of sequences
total_len = self.total_len()
num_seq = self.get_counts()
return total_len / num_seq
def get_abs_frequencies(self) -> Counter:
# return number of occurences not normalized by length
counters = self.aa_dist()
abs_count = Counter()
for counter in counters:
abs_count = abs_count + counter
return abs_count
def get_av_frequencies(self) -> Counter:
# return number of occurences normalized by length
abs_dict = self.get_abs_frequencies()
avg_dict = Counter()
total_len = self.total_len()
for key in abs_dict:
avg_dict[key] = abs_dict[key] / total_len
return avg_dict
def read_fasta(self, filename: str):
# Reads a FASTA file and saves all containing sequences to a list of sequences in AADist.
with open(filename, "r") as f:
seq_header = ""
seq = ""
sequence_started = False
for line in f:
if line.startswith(">") or line.startswith(";"):
if sequence_started:
filtered_seq = seq.replace("*", "")
self.__sequences.append(filtered_seq)
seq = ""
filtered_seq = ""
sequence_started = False
continue
sequence_started = True
seq += line.strip()
filtered_seq = seq.replace("*", "")
self.__sequences.append(filtered_seq)
def aa_dist(self) -> List:
# returns a list of sequence distributions
counters = []
for seq in self.__sequences:
counters.append(Counter(seq))
return counters
<file_sep>import numpy as np
import json
from pathlib import Path
from collections import defaultdict
from collections import Counter
import itertools
def json_data():
test_json = 'tests/blast_test.json'
relative_path = Path(__file__).parent
with Path(relative_path, test_json).open('r') as json_file:
json_data = json.load(json_file)
return json_data
def db_sequences(json_data):
return json_data['db_sequences']
def query_seq(json_data):
return json_data['query_seq']
def sub_matrix(json_data):
return np.array(json_data['sub_matrix'], dtype=np.int64)
def query_pssm(json_data):
return np.array(json_data['query_pssm'], dtype=np.int64)
def table_list_tuple(data):
for key, value in data.items():
data[key] = [tuple(x) for x in value]
return data
def blast_hsp_one_hit_1(json_data):
return table_list_tuple(json_data['blast_hsp_one_hit_1'])
def blast_hsp_one_hit_pssm_1(json_data):
return table_list_tuple(json_data['blast_hsp_one_hit_pssm_1'])
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.word_count_in_seq = []
self.words_dict = defaultdict(list)
self.word_counter = Counter()
self.database = []
def add_sequence(self, sequence):
seq = sequence
seq_len = len(seq)
seq_idx = len(self.database)
words = [seq[i:i+3] for i in range(0, seq_len - 2)]
seq_counter = Counter(words)
indices = defaultdict(list)
for idx, word in enumerate(words):
indices[word].append(idx)
for word in indices:
self.words_dict[word].append((seq_idx, indices[word]))
self.word_counter.update(seq_counter)
self.word_count_in_seq.append(len(seq_counter))
self.database.append(sequence)
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
indices = self.words_dict[word]
result = []
for seq_idx, word_idxs in indices:
result.append(self.database[seq_idx])
return result
def get_sequences_with_indexes(self, word):
return self.words_dict[word]
def get_db_stats(self):
db_size = len(self.database)
num_words = len(self.word_counter)
words_per_seq = np.rint(np.mean(np.array(self.word_count_in_seq)))
seq_per_word = np.rint(np.mean(np.array([len(array) for array in self.words_dict.values()])))
return (db_size, num_words, words_per_seq, seq_per_word)
def print_stats(self):
print("##### words_dict #####", len(self.words_dict["DEF"]))
print(self.words_dict["DEF"])
print("##### word_counter #####", len(self.word_counter))
print(self.word_counter)
print("##### word_count_in_seq #####", len(self.word_count_in_seq))
print(self.word_count_in_seq)
print("#####database#####")
print("length:", len(self.database))
class Blast:
def __init__(self, substitution_matrix):
self.sub_matrix = substitution_matrix
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
if pssm is None:
return set(list(itertools.chain(*self.get_words_subst(sequence, T).values())))
return set(list(itertools.chain(*self.get_words_pssm(pssm, T).values())))
def get_words_subst(self, sequence, T):
seq = sequence
words = defaultdict(list)
for index in range(0, len(seq) - 2):
aa_0 = seq[index]
aa_1 = seq[index + 1]
aa_2 = seq[index + 2]
word_key = aa_0 + aa_1 + aa_2
for idx, x in enumerate(self.sub_matrix[AA_TO_INT[aa_0]]):
x_plus_y = x + self.sub_matrix[AA_TO_INT[aa_1]]
for idy, sum_x_y in enumerate(x_plus_y):
sum_x_y_z = sum_x_y + self.sub_matrix[AA_TO_INT[aa_2]]
indices = np.where(sum_x_y_z >= T)[0]
for pos in indices:
word = INT_TO_AA[idx] + INT_TO_AA[idy] + INT_TO_AA[pos]
words[word_key].append(word)
return words
def get_words_pssm(self, pssm, threshold):
words = defaultdict(list)
for row_idx in range(0, pssm.shape[0] - 2):
row_0 = pssm[row_idx]
row_1 = pssm[row_idx + 1]
row_2 = pssm[row_idx + 2]
for idx, x in enumerate(row_0):
x_plus_y = x + row_1
for idy, sum_x_y in enumerate(x_plus_y):
sum_x_y_z = sum_x_y + row_2
indices = np.where(sum_x_y_z >= threshold)[0]
for pos in indices:
word = INT_TO_AA[idx] + INT_TO_AA[idy] + INT_TO_AA[pos]
words[str(row_idx)].append(word)
return words
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
result = defaultdict(set)
if pssm is None:
result = self.search_one_hit_subst(blast_db, query=query, T=T, X=X, S=S)
else:
result = self.search_one_hit_pssm(blast_db, pssm, T=T, X=X, S=S)
result_with_list = dict()
for dictItem in result:
result_with_list[dictItem] = list(result[dictItem])
return result_with_list
def search_one_hit_subst(self, blast_db,* ,query=None, T=13, X=5, S=30):
q_seq = query
seq_len = len(q_seq)
result = defaultdict(set)
alternatives = self.get_words_subst(sequence=q_seq, T=T)
for q_s_idx in range(0, seq_len - 2):
q_r_idx = q_s_idx + 3
q_word = q_seq[q_s_idx:q_r_idx]
for alternative in alternatives[q_word]:
seqs = blast_db.get_sequences_with_indexes(alternative)
for target_item in seqs:
t_seq = blast_db.database[target_item[0]]
indices = target_item[1]
for t_s_idx in indices:
hps = self.align(t_seq, t_s_idx, q_s_idx, q_seq=q_seq, X=X)
if hps[3] >= S:
result[t_seq].add(hps)
return result
def search_one_hit_pssm(self, blast_db, pssm, *, T=13, X=5, S=30):
result = defaultdict(set)
alternatives = self.get_words_pssm(pssm, T)
for q_s_idx in range(0, pssm.shape[0]):
q_r_idx = q_s_idx + 3
for alternative in alternatives[str(q_s_idx)]:
seqs = blast_db.get_sequences_with_indexes(alternative)
for target_item in seqs:
t_seq = blast_db.database[target_item[0]]
indices = target_item[1]
for t_s_idx in indices:
hps = self.align(t_seq, t_s_idx, q_s_idx, pssm=pssm, X=5)
if hps[3] >= S:
result[t_seq].add(hps)
return result
def get_pssm_score(self, q_l_idx, q_r_idx, t_word, pssm):
score = 0
for i in range(0, q_r_idx - q_l_idx):
score += pssm[q_l_idx + i][AA_TO_INT[t_word[i]]]
return score
def align(self, t_seq, t_s_idx, q_s_idx, *, q_seq=None, X=5, pssm=None):
# print("q_s_idx: ", q_s_idx)
# print("t_s_idx", t_s_idx)
q_l_idx = q_s_idx
t_l_idx = t_s_idx
q_r_idx = q_s_idx + 3
t_r_idx = t_s_idx + 3
q_word = ""
t_word = t_seq[t_s_idx:t_r_idx]
hps_score = 0
if pssm is not None:
hps_score = self.get_pssm_score(q_l_idx, q_r_idx, t_word, pssm)
else:
q_word = q_seq[q_s_idx:q_r_idx]
hps_score = self.get_score(q_word, t_word)
hps_q_l_idx = q_s_idx
hps_q_s_idx = q_s_idx
hps_q_r_idx = q_r_idx
hps_t_l_idx = t_s_idx
hps_t_s_idx = t_s_idx
hps_t_r_idx = t_r_idx
# if 304 < t_r_idx < 323 or 16 < q_r_idx < 35:
# print("q_r_idx", q_r_idx)
# print("t_r_idx", t_r_idx)
# print("going right")
while q_r_idx <= (len(q_seq) if pssm is None else pssm.shape[0]) and t_r_idx <= len(t_seq):
# q_part = q_seq[q_s_idx:q_r_idx]
t_part = t_seq[t_s_idx:t_r_idx]
score = 0
if pssm is not None:
score = self.get_pssm_score(q_l_idx, q_r_idx, t_part, pssm)
else:
q_part = q_seq[q_s_idx:q_r_idx]
score = self.get_score(q_part, t_part)
# print("score:", score, ";", q_part, "||", t_part)
if score > hps_score:
hps_q_r_idx = q_r_idx
hps_t_r_idx = t_r_idx
hps_score = score
if score <= hps_score - X:
# print("########## len(q_seq)", len(q_seq), "| len(t_seq)", len(t_seq))
# print("q_r_idx", q_r_idx, "| t_r_idx", t_r_idx)
# print("should stop")
break
q_r_idx += 1
t_r_idx += 1
# print("going left")
while q_l_idx >= 0 and t_l_idx >= 0:
# q_part=q_seq[q_l_idx:hps_q_r_idx]
t_part=t_seq[t_l_idx:hps_t_r_idx]
score = 0
if pssm is not None:
score = self.get_pssm_score(q_l_idx, hps_q_r_idx, t_part, pssm)
else:
q_part = q_seq[q_l_idx:hps_q_r_idx]
score = self.get_score(q_part, t_part)
# print("score:", score, ";", q_part, "||", t_part)
if score > hps_score:
hps_q_l_idx = q_l_idx
hps_t_l_idx = t_l_idx
hps_score = score
if score <= hps_score - X:
# print("########## len(q_seq)", len(q_seq), "| len(t_seq)", len(t_seq))
# print("q_r_idx", q_r_idx, "| t_r_idx", t_r_idx)
# print("should stop")
break
q_l_idx -= 1
t_l_idx -= 1
# hps_q = q_seq[hps_q_l_idx:hps_q_r_idx]
hps_t = t_seq[hps_t_l_idx:hps_t_r_idx]
# print("HPS-query:", hps_q)
# print("HPS-target:", hps_t)
hps = (hps_q_l_idx, hps_t_l_idx, len(hps_t), hps_score)
# print(hps)
return hps
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]
def get_score(self, q_word, t_word):
score = 0
for i in range(0, len(q_word)):
# print(AA_TO_INT[q_word[i]], AA_TO_INT[t_word[i]])
# print(self.sub_matrix[AA_TO_INT[q_word[i]]][AA_TO_INT[t_word[i]]])
score += self.sub_matrix[AA_TO_INT[q_word[i]]][AA_TO_INT[t_word[i]]]
# print(score)
return score
def main():
print("hello")
sub_m = sub_matrix(json_data())
db = BlastDb()
# db.add_sequence("MGPRARPALFFLILLRTVAA")
# db.add_sequence("MGELMAFLLPLIIVLMVKHS")
#
# q_seq = "MGPRARPAFLLLMLLQTAVL"
blast = Blast(sub_m)
sequences = db_sequences(json_data())
for sequence in sequences:
db.add_sequence(sequence)
q_seq = query_seq(json_data())
pssm = query_pssm(json_data())
# myResult = blast.search_one_hit(db, query=q_seq, T=13, X=5, S=30)
myResult = blast.search_one_hit(db, pssm=pssm, T=13, X=5, S=30)
# print("result:")
# for result in myResult:
# print(result)
# hsps = myResult[result]
# for hsp in hsps:
# print(hsp)
# print("query[", hsp[0], ":", hsp[0]+hsp[2], " ]:", q_seq[hsp[0]:hsp[0]+hsp[2]])
# print("target[", hsp[1], ":", hsp[1]+hsp[2], " ]:", result[hsp[1]:hsp[1] + hsp[2]])
validate(sequences, myResult)
# ##### Returned HPSs are not correct #####
# My_hpss:
# [(83, 333, 15, 36)]
# must
# be:
# [(83, 333, 15, 36), (17, 305, 17, 32)]
# idx in db: 263
# sequence: MADRGGPAEEPSPRGSPRSEPRVPRTAGPSETPRTAALALRFDKPIKQAFYNTGAVLFVCLCCGAAVLVYFILEAFLRPLLWAVLCGTFLHPFKSSLTRLGRLWLRRLHRAHTPIVLAALLLPLCFADYGVEALGEQALRRRRLLLLLGAGGPLLYGLYCLGSYLGVQVLLAHAGALICRGLDYFSSLWIWTLVVGYVLMVSFKWNASTQHYLRAVSIPVWMILLFHIASLAGSWRIPVFLVIVFLMSAGTLYEKQNEKESAGAELPGQVISMAASTLANLAISITGYESSSEDQPSDPSAEPTDKGEPPPAPSASSSSSSR<KEY>TPLS<KEY>GLYTWLTHTVFGINIVFIPSALAAILGAVPFLGTYWAAVPAVLDLWLTQGLGCKAVLLLVFHLLPTYFVDTAIYSDISGGGHPYLTGLAVAGGAYYLGLEGAIIGPILLCILVVASNIYSAMLVSPTNSMPTPSQTPWPAQTQRTFRDISEDLKSSVD
# ----------------------------------------------
def validate(sequences, myResult):
# result_data = blast_hsp_one_hit_1(json_data())
result_data = blast_hsp_one_hit_pssm_1(json_data())
print("from json:", type(result_data))
print(len(myResult), len(result_data), len(myResult) == len(result_data))
print(set(myResult) == set(result_data))
not_found_count = 0
err_count = 0
for result in result_data:
if result not in myResult:
not_found_count += 1
print("##### No HPSs found #####")
print("must be:")
print(result_data[result])
print("idx in db:", sequences.index(result))
print("sequence:", result)
print("----------------------------------------------")
print()
continue
result_hpss = myResult[result]
if result_hpss != result_data[result]:
err_count += 1
print("##### Returned HPSs are not correct #####")
print("My_hpss:")
print(result_hpss)
print("must be:")
print(result_data[result])
print()
print("idx in db:", sequences.index(result))
print("sequence:", result)
print("----------------------------------------------")
print()
print("Number of missing:", not_found_count)
print("Total number of errors:", err_count)
if __name__ == '__main__':
main()<file_sep>import numpy as np
from pathlib import Path
from math import pow
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.sequences = []
self.words = []
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
self.sequences.append(sequence)
word_set = set([])
for i in range(0, len(sequence)):
if i+2 < len(sequence):
word_set.add(sequence[i:i+3])
else:
break
self.words.append(word_set)
#print("sequence: {0}".format(sequence))
#print("words: {0}".format(self.words))
def get_all_words(self):
word_set_total = set([])
for w in self.words:
word_set_total = word_set_total.union(w)
results = []
word_set_length = len(word_set_total)
for i in range(word_set_length):
results.append(word_set_total.pop())
return results
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
results = []
for s in self.sequences:
if word in s:
results.append(s)
return results
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
a = len(self.sequences)
word_set_total = set([])
for w in self.words:
word_set_total = word_set_total.union(w)
b = len(word_set_total)
length_sum = 0.0
if len(self.words) > 0:
for w in self.words:
length_sum += len(w)
length_sum /= len(self.words)
c = round(length_sum)
word_sum = 0.0
num_of_words = len(word_set_total)
if num_of_words > 0:
for i in range(num_of_words):
w = word_set_total.pop()
word_sum += len(self.get_sequences(w))
word_sum /= num_of_words
d = round(word_sum)
return tuple([a, b, c, d])
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.substitution_matrix = substitution_matrix
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
matrix = self.substitution_matrix
word_set = set([])
if sequence:
db = BlastDb()
db.add_sequence(sequence)
words = db.get_all_words()
for w in words:
a = w[0]
index1 = AA_TO_INT[a]
b = w[1]
index2 = AA_TO_INT[b]
c = w[2]
index3 = AA_TO_INT[c]
for x in range(len(matrix[index1])):
for y in range(len(matrix[index2])):
for z in range(len(matrix[index3])):
if matrix[index1][x] + matrix[index2][y] + matrix[index3][z] >= T:
word_set.add(INT_TO_AA[x] + INT_TO_AA[y] + INT_TO_AA[z])
else:
for i in range(0, len(pssm)):
if i+2 < len(pssm):
a = pssm[i]
b = pssm[i+1]
c = pssm[i+2]
for index1 in range(len(a)):
for index2 in range(len(b)):
for index3 in range(len(c)):
if a[index1] + b[index2] + c[index3] >= T:
word_set.add(INT_TO_AA[index1] + INT_TO_AA[index2] + INT_TO_AA[index3])
word_set_length = len(word_set)
results = []
for i in range(word_set_length):
results.append(word_set.pop())
results.sort()
return results
def search_one_hit(self, blast_db, *, sequence=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
matrix = self.substitution_matrix
if sequence:
words = self.get_words(sequence=sequence, T=T)
for w in words:
sequences = blast_db.get_sequences(w)
for s in sequences:
try:
index = s.index(w)
index_sequence = sequence.index(w)
except:
break
results = []
a = w[0]
index1 = AA_TO_INT[a]
b = w[1]
index2 = AA_TO_INT[b]
c = w[2]
index3 = AA_TO_INT[c]
default_value = matrix[AA_TO_INT[s[index]]][index1] + matrix[AA_TO_INT[s[index+1]]][index2] + matrix[AA_TO_INT[s[index+2]]][index3]
x = w
value = default_value
max = value
count = 1
for i in range(index+3, len(s)):
if index_sequence+2+count >= len(sequence):
break
y = x + s[i]
tmp = value + matrix[AA_TO_INT[s[i]]][AA_TO_INT[sequence[index_sequence+2+count]]]
if tmp >= S:
results.append((index_sequence, index, len(y), tmp, "right"))
elif tmp >= max - X:
if max < tmp:
max = tmp
value = tmp
x = y
else:
break
count += 1
count = 1
for i in range(index-1, 0, -1):
if index_sequence-count < 0:
break
y = s[i] + x
tmp = value + matrix[AA_TO_INT[s[i]]][AA_TO_INT[sequence[index_sequence-count]]]
if tmp >= S:
results.append((index_sequence-count, i, len(y), tmp, "left"))
elif tmp >= max - X:
if max < tmp:
max = tmp
value = tmp
x = y
else:
break
count += 1
x = w
value = default_value
max = value
left_count = 0
for i in range(index-1, 0, -1):
if index_sequence-left_count-1 < 0:
left_count += 1
break
y = s[i] + x
tmp = value + matrix[AA_TO_INT[s[i]]][AA_TO_INT[sequence[index_sequence-left_count-1]]]
if tmp >= S:
results.append((index_sequence-left_count-1, i, len(y), tmp, "left"))
elif tmp >= max - X:
if max < tmp:
max = tmp
value = tmp
x = y
else:
left_count += 1
break
left_count += 1
count = 1
for i in range(index+3, len(s)):
if index_sequence+2+count >= len(sequence):
break
y = x + s[i]
tmp = value + matrix[AA_TO_INT[s[i]]][AA_TO_INT[sequence[index_sequence+2+count]]]
if tmp >= S:
results.append((index_sequence-left_count, index-left_count, len(y), tmp, "right"))
found = True
elif tmp >= max - X:
if max < tmp:
max = tmp
value = tmp
x = y
else:
break
max_tuple = None
max_value = -1
max_value_length = -1
for r in results:
if r[3] > max_value:
max_tuple = r
max_value = r[3]
max_value_length = r[2]
elif r[3] == max_value and r[2] < max_value_length:
max_tuple = r
max_value = r[3]
max_value_length = r[2]
if max_value > -1:
d[s] = [max_tuple]
#print("dict: {0}".format(d))
return d
#d['SEQWENCE'] = [(1, 2, 4, 13)]
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]<file_sep>##############
# Exercise 2.7
##############
basic = ['H', 'K', 'R']
acidic = ['D', 'E']
non_polar = ['G', 'A', 'V', 'L', 'I', 'P']
polar = ['R', 'N', 'D', 'Q', 'E', 'H', 'K', 'S', 'T', 'Y']
polar2 = ['Q', 'N', 'H', 'S', 'T', 'Y', 'C']
positive = ['R', 'H', 'K']
negative = ['D', 'E']
hydrophobic = ['A', 'V', 'I', 'L', 'M', 'F', 'Y', 'W']
sulfur = ['C', 'M']
aromatic = ['F', 'W', 'Y', 'H']
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
return aa in positive
def isNegativelyCharged(aa):
return aa in negative
def isBasic(aa):
return aa in basic
def isPolar(aa):
return aa in polar
def isAcid(aa):
#return True
return aa in acidic
def isHydrophobic(aa):
return aa in hydrophobic
def isProline(aa):
return aa is 'P'
def isAromatic(aa):
return aa in aromatic
def containsSulfur(aa):
return aa in sulfur<file_sep>##############
# Exercise 2.6
##############
from collections import Counter
from itertools import groupby
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
return len(self.__sequences)
def __get_total_length(self):
return sum(map(lambda x: len(x[1]), self.__sequences))
def get_average_length(self):
return self.__get_total_length() / self.get_counts()
def read_fasta(self, path):
with open(path) as f:
data_iter = (x[1] for x in groupby(f, lambda l: l[0] == '>'))
for elem in data_iter:
header = elem.__next__()[1:].strip()
seq = ''.join(s.strip() for s in data_iter.__next__())
seq = seq.replace('*', '').replace('-', '')
self.__sequences.append((header, seq))
def get_abs_frequencies(self):
s = str.join('', map(lambda x: x[1], self.__sequences))
return Counter(s)
def get_av_frequencies(self):
total = self.__get_total_length()
absolute_freqs = self.get_abs_frequencies()
if total == 0:
return {}
else:
return {
key: (absolute_freqs[key] / total) for key in absolute_freqs
}
<file_sep>##############
# Exercise 2.6
##############
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def add_sequence(self, seq):
seq = seq.replace("*", "")
self.__sequences.append(seq)
def get_counts(self):
return len(self.__sequences)
def get_average_length(self):
total_length = 0
for sequence in self.__sequences:
total_length += len(sequence)
return total_length / AADist.get_counts(self)
def read_fasta(self, filepath):
with open(filepath, "r") as f:
seq = ""
sequence_started = False
for line in f:
if line.startswith(">") or line.startswith(";"):
if sequence_started:
AADist.add_sequence(self, seq)
seq = ""
sequence_started = False
continue
sequence_started = True
seq += line.strip()
AADist.add_sequence(self, seq)
return self.__sequences
def get_abs_frequencies(self):
# return number of occurences not normalized by length
dictionary = {
'A': 0,
'R': 0,
'N': 0,
'D': 0,
'C': 0,
'E': 0,
'Q': 0,
'G': 0,
'H': 0,
'I': 0,
'L': 0,
'K': 0,
'M': 0,
'F': 0,
'P': 0,
'S': 0,
'T': 0,
'W': 0,
'Y': 0,
'V': 0,
}
for sequence in self.__sequences:
for a in sequence:
if a == 'A':
dictionary['A'] += 1
if a == 'R':
dictionary['R'] += 1
if a == 'N':
dictionary['N'] += 1
if a == 'D':
dictionary['D'] += 1
if a == 'C':
dictionary['C'] += 1
if a == 'E':
dictionary['E'] += 1
if a == 'Q':
dictionary['Q'] += 1
if a == 'G':
dictionary['G'] += 1
if a == 'H':
dictionary['H'] += 1
if a == 'I':
dictionary['I'] += 1
if a == 'L':
dictionary['L'] += 1
if a == 'K':
dictionary['K'] += 1
if a == 'M':
dictionary['M'] += 1
if a == 'F':
dictionary['F'] += 1
if a == 'P':
dictionary['P'] += 1
if a == 'S':
dictionary['S'] += 1
if a == 'T':
dictionary['T'] += 1
if a == 'W':
dictionary['W'] += 1
if a == 'Y':
dictionary['Y'] += 1
if a == 'V':
dictionary['V'] += 1
return dictionary
def get_av_frequencies(self):
# return number of occurences normalized by length
dictionary = AADist.get_abs_frequencies(self)
total_length = 0
for sequence in self.__sequences:
total_length += len(sequence)
for key in dictionary:
dictionary[key] = dictionary[key] / total_length
return dictionary<file_sep>def get_orfs(dnsseq: str):
#only accept A, T, C and G
for i in dnsseq:
if i == "A" or i == "T" or i == "C" or i == "G":
continue
else:
raise TypeError("Input is not a DNA sequence")
break
beginning = 0
stopcodon = ["TAA", "TAG", "TGA"]
intcodon = "ATG"
list = []
counter = 0
while beginning <= 3:
for i in range (beginning, len(dnsseq)-1):
if dnsseq[i] == "A" and dnsseq[i+1] == "T" and dnsseq[i+2] == "G":
counter = i
if (dnsseq[i+4] + dnsseq[i+5] + dnsseq[i+6]) not in stopcodon:
intcodon = intcodon + dnsseq[i+5]
continue
intcodon = intcodon + dnsseq[i+4] + dnsseq[i+5] + dnsseq[i+6]
if len(intcodon) > 33:
tuple = (i, len(intcodon) - i, intcodon, True)
list.append(tuple)
beginning += 1
#complementary of the string
s =""
for i in range(0, len(dnsseq)):
if dnsseq[i] == "T":
s = s + "A"
elif dnsseq[i] == "A":
s = s + "T"
elif dnsseq[i] == "C":
s = s + "G"
elif dnsseq[i] == "G":
s = s + "C"
#reversing the string
rev = ""
length = len(dnsseq)-1
while length >= 0:
rev = rev + dnsseq[length]
length -= 1
beginningrev = 0
while beginningrev <= 3:
for i in range (beginningrev, len(rev)-1):
if rev[i] == "A" and rev[i+1] == "T" and rev[i+2] == "G":
counter = i
if (rev[i+5] + rev[i+6] + rev[i+7]) not in stopcodon:
intcodon = intcodon + rev[i+5]
continue
intcodon = intcodon + rev[i+5] + rev[i+6] + rev[i+7]
if len(intcodon) > 33:
tuple = (i, len(intcodon) - i, intcodon, False)
list.append(tuple)
beginningrev += 1
return list
<file_sep># -*- coding: utf-8 -*-
"""
IMPORTANT!:
Before writing an email asking questions such as
'What does this input has to be like?' or
'What return value do you expect?' PLEASE read our
exercise sheet and the information in this template
carefully.
If something is still unclear, PLEASE talk to your
colleagues before writing an email!
If you experience technical issues or if you find a
bug we are happy to answer your questions. However,
in order to provide quick help in such cases we need
to avoid unnecessary emails such as the examples
shown above.
"""
# Tip: This module might be useful for parsing...
from Bio import SeqIO
import xml.etree.ElementTree as ET
############ Exercise 3: SwissProt ##########
class SwissProt_Parser:
PARSER = SeqIO
# Root of the XML tree
sp_anno = None
sp_anno2 = None
def __init__( self, path, frmt='uniprot-xml' ):
'''
Initialize every SwissProt_Parser with a path to a XML-formatted UniProt file.
An example file is included in the repository (P09616.xml).
Tip: Store the parsed XML entry in an object variable instead of parsing it
again & again ...
'''
self.path = path
tree = ET.parse(path)
self.sp_anno = tree.getroot() # Parse the XML file once and re-use it in the functions below
self.sp_anno2 = SeqIO.parse(path, "uniprot-xml")
# 3.2 SwissProt Identifiers
def get_sp_identifier( self ):
'''
Input:
self: Use XML entry which has been parsed & saved during object initialization
Return:
Unique SwissProt identifier for the given xml file
'''
entry = self.sp_anno.find('{http://uniprot.org/uniprot}entry')
print(entry.tag, entry.attrib)
accession = entry.find('{http://uniprot.org/uniprot}accession')
print(accession.text)
identifier = accession.text
return identifier
# 3.3 SwissProt Sequence length
def get_sp_sequence_length( self ):
'''
Input:
self: Use XML entry which has been parsed & saved during object initialization
Return:
Return sequence length of the UniProt entry as an integer.
'''
entry = self.sp_anno.find('{http://uniprot.org/uniprot}entry')
sequence = entry.find('{http://uniprot.org/uniprot}sequence')
seq_len = int(sequence.attrib['length'])
return seq_len
# 3.4 Organism
def get_organism( self ):
'''
Input:
self: Use XML entry which has been parsed & saved during object initialization
Return:
Return the name of the organsim as stated in the corresponding field
of the XML data. Return value has to be a string.
'''
for record in self.sp_anno2:
organism = record.annotations['organism']
# entry = self.sp_anno.find('''{http://uniprot.org/uniprot}entry''')
# # something = self.sp_anno.annotation['organism']
# print (entry)
# organism_tag = entry.find('''{http://uniprot.org/uniprot}organism''')
# organism = organism_tag.find('''{http://uniprot.org/uniprot}name''')
# print (organism)
return organism
# 3.5 Localizations
def get_localization( self ):
'''
Input:
self: Use XML entry which has been parsed & saved during object initialization
Return:
Return the name of the subcellular localization as stated in the
corresponding field.
Return value has to be a list of strings.
'''
localization = []
entry = self.sp_anno.find('{http://uniprot.org/uniprot}entry')
comments = entry.findall('{http://uniprot.org/uniprot}comment')
for comment in comments:
if comment.attrib['type'] == "subcellular location":
subCellLoc = comment.find('{http://uniprot.org/uniprot}subcellularLocation')
locations = subCellLoc.findall('{http://uniprot.org/uniprot}location')
for location in locations:
localization.append(location.text)
break
return localization
# 3.6 Cross-references to PDB
def get_pdb_support( self ):
'''
Input:
self: Use XML entry which has been parsed & saved during object initialization
Return:
Returns a list of all PDB IDs which support the annotation of the
given SwissProt XML file. Return the PDB IDs as list.
'''
pdb_ids = []
entry = self.sp_anno.find('{http://uniprot.org/uniprot}entry')
dbReferences = entry.findall('{http://uniprot.org/uniprot}dbReference')
for dbReference in dbReferences:
if dbReference.attrib['type'] == "PDB":
pdb_ids.append(dbReference.attrib['id'])
# citation = reference.find('{http://uniprot.org/uniprot}citation')
# dbReference = citation.find('{http://uniprot.org/uniprot}dbReference')
# if (dbReference != None):
# pdb_ids.append(dbReference.attrib['id'])
return pdb_ids
def main():
# print('SwissProt XML Parser class')
# sw_par = SwissProt_Parser("/home/iam/Documents/TUM/3rd semester/protein_prediction/exercises/pp1ss19exercise2-exercise-ge29lel/tests/P09616.xml")
# print(sw_par.get_organism())
return None
if __name__ == '__main__':
main()
<file_sep>##############
# Exercise 2.5
##############
# You can use the supplied test cases for your own testing. Good luck!
import ex1
def complementary(string):
ret='';
for i in string:
if i=='A' :
ret+='T'
elif i=='T':
ret+='A'
elif i=='G':
ret+='C'
elif i=='C':
ret+='G'
return ret
def get_start_codons(genome, p):
#Finding all Start Codons
#First Frame
pos = []
for i in range(p,len(genome)-1,3):
if genome[i] == 'A' and genome[i+1] == "T" and genome[i+2] == "G":
pos.append(i); #Finding every start codon
return pos
def get_stop_codons(genome, p):
#Get the positions of each beginning of stop codons
pos = []
for i in range(p,len(genome) + get_start_codons(genome, p)[len(get_start_codons(genome, p))-1],3):
if i > len(genome)-2 :
n = len(genome)
if genome[i-n] == 'T' and genome[i+1-n] == "A" and genome[i+2-n] == "A":
pos.append(i)
elif genome[i-n] == 'T' and genome[i+1-n] == "A" and genome[i+2-n] == "G":
pos.append(i)
elif genome[i-n] == 'T' and genome[i+1-n] == "G" and genome[i+2-n] == "A":
pos.append(i);
else :
if genome[i] == 'T' and genome[i+1] == "A" and genome[i+2] == "A":
pos.append(i)
elif genome[i] == 'T' and genome[i+1] == "A" and genome[i+2] == "G":
pos.append(i)
elif genome[i] == 'T' and genome[i+1] == "G" and genome[i+2] == "A":
pos.append(i);
return pos
def keep_useful_seq(genome, pos, posstops):
#1.The genome itself, 2. The position of the start codons 3.The position of every stop codon for that frame
Fpos = []
for i in range(1, len(posstops),1):
provstops=False
for j in range(0, len(pos),1):
if provstops is False :
if(pos[j]<posstops[i] and posstops[i-1]<pos[j]):
Fpos.append([pos[j], posstops[i]])
provstops=True
#Write what happens if the index of the last stop codon is smaller then the last start codon.
bigger = False
for i in range(len(pos)):
if bigger is False :
if pos[i]>posstops[len(posstops)-1]:
bigger=True
if posstops[0] < pos[0]:
Fpos.append([pos[i],posstops[0]])
else :
Fpos[0][0]=pos[i]
return Fpos #List of lists containing beginning and end of ORF
def are_Long_Enough(genome, pos, direction):
res = []
for i in range(0, len(pos)):
if direction is False and pos[i][0] > pos[i][1]+2 :
if abs((len(genome)-(pos[i][0]) + (pos[i][1]+2)))/3 >= 34:
res.append(pos[i])
elif direction is True and pos[i][1] < pos[i][0]+2 :
if abs((pos[i][1]) - (pos[i][0]+2))/3 >= 34:
res.append(pos[i])
elif abs((pos[i][1]+2 - pos[i][0]))/3 >= 34:
res.append(pos[i])
return res
def tuple_res(genome, lolist, direction):
res = []
if direction :
n = len(genome)
for i in range(len(lolist)):
if(lolist[i][0] > lolist[i][1]+2) :
chaine = genome[n-lolist[i][1]-3:len(genome)-1] + genome[0:n-lolist[i][0]-1]
res.append((n-lolist[i][0]-1, n-lolist[i][1]-3, ex1.codons_to_aa(chaine), direction))
else :
res.append((n-lolist[i][0]-1, n-lolist[i][1]-3, ex1.codons_to_aa(genome[lolist[i][0]:lolist[i][1]]), direction))
return res
else :
for i in range(len(lolist)):
if lolist[i][1]+2 > len(genome) :
res.append((lolist[i][0], lolist[i][1]+2-len(genome), ex1.codons_to_aa(genome[lolist[i][0]:len(genome)] + genome[0:(lolist[i][1]-len(genome))]), direction))
else :
res.append((lolist[i][0], lolist[i][1]+2, ex1.codons_to_aa(genome[lolist[i][0]:lolist[i][1]]), direction))
return res
def get_orfs(genome):
for i in range(0, len(genome)) :
I = genome[i]
if not (I == "A" or I == "T" or I== "G" or I == "C"):
raise TypeError
orfs = []
complement = complementary(genome[::-1])
sf1gen = tuple_res(genome,are_Long_Enough(genome,keep_useful_seq(genome, get_start_codons(genome, 0), get_stop_codons(genome, 0)), False), False)
sf2gen = tuple_res(genome,are_Long_Enough(genome,keep_useful_seq(genome, get_start_codons(genome, 1), get_stop_codons(genome, 1)), False), False)
sf3gen = tuple_res(genome,are_Long_Enough(genome,keep_useful_seq(genome, get_start_codons(genome, 2), get_stop_codons(genome, 2)), False), False)
sf1rev = tuple_res(complement,are_Long_Enough(complement,keep_useful_seq(complement, get_start_codons(complement, 0), get_stop_codons(complement, 0)), True), True)
sf2rev = tuple_res(complement,are_Long_Enough(complement,keep_useful_seq(complement, get_start_codons(complement, 1), get_stop_codons(complement, 1)), True), True)
sf3rev = tuple_res(complement,are_Long_Enough(complement,keep_useful_seq(complement, get_start_codons(complement, 2), get_stop_codons(complement, 2)), True), True)
orfs = list(tuple(sf1gen) + tuple(sf2gen) + tuple(sf3gen) + tuple(sf1rev) + tuple(sf2rev) + tuple(sf3rev))#.append(sortedframe3gen).append(sortedframe1revcompl).append(sortedframe2revcompl).append(sortedframe3revcompl)
return orfs<file_sep>##############
# Exercise 2.5
##############
from main import codon_dict
import re
# You can use the supplied test cases for your own testing. Good luck!
# define data dictionary for DNA nucleobase complement
complement_parameters = {
"A": "T",
"T": "A",
"C": "G",
"G": "C"
}
# complementary function
def complementary(input_string):
# initiate the resulting complementary sting as an empty string
complement_string = ""
# for each input char find complementary char from the data dictionary
for input_char in input_string:
complement_string += complement_parameters[input_char] #concatenate the concatenated char
# return the resulting complementary DNA nucleobase string
return complement_string
def get_orfs(genome):
genome = genome.replace(" ", "")
genome = genome.upper() #basic trimming
orfs = [] #initiate return variable
if bool(re.match('^[TCAGtcag]+$', genome)) is not True: #check if gnome is valid DNA sequence
raise TypeError("Invalid DNA Sequence")
else:
sequence_length = len(genome) #find genome length
init_index = 0
loop_control = 0
for i in range(0, 3): #for 3 reading frames
string2 = ""
for j in range(i, sequence_length - 2, 3): #iterate string from i to end with step 3
string3 = ""
string3 = genome[j] + genome[j + 1] + genome[j + 2] #join nucleotide to form codon
string3 = codon_dict[string3] #convert codons to amino acid,
if string3 == "Z": #work for end codon
loop_control = 0
final_index = j + 2
if len(string2) > 33:
orfs.append(tuple((init_index, final_index, string2, False))) #add to orf list if string greater than 33
string2 = ""
j = j + 1
if loop_control == 1: #work from start to end
string2 = string2 + string3
if string3 == "M" and loop_control == 0: #check for start codons
init_index = j
loop_control = 1
string2 = string3
reversed_dna = genome[::-1] #reverse the dna sequence
reversed_dna = complementary(reversed_dna) #complement the reversed dna
#similar workout for reversed complementary dna
for i in range(0, 3):
string2 = ""
for j in range(i, sequence_length - 2, 3):
string3 = ""
string3 = reversed_dna[j] + reversed_dna[j + 1] + reversed_dna[j + 2]
string3 = codon_dict[string3]
if string3 == "Z":
loop_control = 0
final_index = j + 2
if len(string2) > 33:
init_index = len(reversed_dna) - init_index - 1
final_index = len(reversed_dna) - final_index - 1
orfs.append(tuple((init_index, final_index, string2, True)))
string2 = ""
j = j + 1
if loop_control == 1:
string2 = string2 + string3
if string3 == "M" and loop_control == 0:
init_index = j
loop_control = 1
string2 = string3
return orfs
<file_sep>import numpy as np
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
row_count=len(sequences)
if len(sequences)<1:
raise TypeError("MSA Sequence does not contain at least one sequence!")
return
length=len(sequences[0])
for i in range(1,row_count):
if len(sequences[i]) != length:
raise TypeError("MSA Sequences does not have the same length!")
return
for seq in sequences:
for i in range(0,len(seq)):
if seq[i] not in ALPHABET:
raise TypeError("MSA Sequences contains unvalid character(s)!")
return
self.msa_seq= sequences
self.row_count=len(self.msa_seq)
self.col_count=len(self.msa_seq[0])
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
pssm = np.zeros((20, 20))
return np.rint(pssm).astype(np.int64)
def get_size(self):
return (self.row_count, self.col_count)
def get_primary_sequence(self):
seq=self.msa_seq[0]
ret_seq=''.join( c for c in seq if c not in '-' )
return ret_seq
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
r=[]
all_s=[]
letters=""
weight_mat=[[]]
for i in range(0,self.row_count):
for j in range(0,self.col_count):
letters+=self.msa_seq[j][i]
s=letters.count(self.msa_seq[j][i])
all_s.append(s)
res = {}
for keys in letters:
res[keys] = res.get(keys, 0) + 1
r.append(len(res))
print(r)
letters=""
for i in range(0,self.col_count):
weight_mat[i][-1]=r[-1]
for i in range(0,self.col_count):
for j in range(0, self.row_count):
weight_mat[i][j]=all_s.pop(0)
for i in range(0,self.col_count):
r=weight_mat[i][-1]
for j in range(0,row_count):
weight_mat[i][j]=1/(weight_mat[i][j]*r)
for i in range(0,self.col_count):
r=weight_mat[i][-1]
for j in range(0, self.row_count):
if r>1:
weight_mat[-1][j]+=weight_mat[i][j]
weight_list=weight_mat[self.col_count]
weights = np.zeros(self.row_count)
weights.append(weight_list)
return weights.astype(np.float64)
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
num_obs = -1
return num_obs.astype(np.float64)
<file_sep># -*- coding: utf-8 -*-
"""
IMPORTANT!:
Before writing an email asking questions such as
'What does this input has to be like?' or
'What return value do you expect?' PLEASE read our
exercise sheet and the information in this template
carefully.
If something is still unclear, PLEASE talk to your
colleagues before writing an email!
If you experience technical issues or if you find a
bug we are happy to answer your questions. However,
in order to provide quick help in such cases we need
to avoid unnecessary emails such as the examples
shown above.
"""
from Bio.PDB.MMCIFParser import MMCIFParser # Tip: This module might be useful for parsing...
from Bio.PDB.Polypeptide import PPBuilder
import numpy as np
############# Exercise 2: Protein Data Bank #############
# General remark: In our exercise every structure will have EXACTLY ONE model.
# This is true for nearly all X-Ray structures. NMR structures have several models.
class PDB_Parser:
CIF_PARSER = MMCIFParser() # parser object for reading in structure in CIF format
def __init__( self, path ):
'''
Initialize every PDB_Parser with a path to a structure-file in CIF format.
An example file is included in the repository (7ahl.cif).
Tip: Store the parsed structure in an object variable instead of parsing it
again & again ...
'''
self.structure = MMCIFParser().get_structure('STR', path)
# 3.8 Chains
def get_number_of_chains( self ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
Return:
Number of chains in this structure as integer.
'''
model = self.structure[0]
return len(model)
# 3.9 Sequence
def get_sequence( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the amino acid sequence (single-letter alphabet!) of a given chain (chain_id)
in a Biopython.PDB structure as a string.
'''
model = self.structure[0]
ppb = PPBuilder()
for seq in ppb.build_peptides(model[chain_id]):
return seq.get_sequence()
# 3.10 Water molecules
def get_number_of_water_molecules( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the number of water molecules of a given chain (chain_id)
in a Biopython.PDB structure as an integer.
'''
chain = self.structure[0][chain_id]
count = 0
for res in chain:
count = count + 1 if res.get_resname() == 'HOH' else 0
return count
# 3.11 C-Alpha distance
def get_ca_distance( self, chain_id_1, index_1, chain_id_2, index_2 ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id_1 : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
index_1 : index of a residue in a given chain in a Biopython.PDB structure
chain_id_2 : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
index_2 : index of a residue in a given chain in a Biopython.PDB structure
chain_id_1 and index_1 describe precisely one residue in a PDB structure,
chain_id_2 and index_2 describe the second residue.
Return:
Return the C-alpha (!) distance between the two residues, described by
chain_id_1/index_1 and chain_id_2/index_2. Round the returned value via int().
The reason for using two different chains as an input is that also the distance
between residues of different chains can be interesting.
Different chains in a PDB structure can either occur between two different proteins
(Heterodimers) or between different copies of the same protein (Homodimers).
'''
try:
res1 = self.structure[0][chain_id_1][index_1]
res2 = self.structure[0][chain_id_2][index_2]
except Exception:
return 0
return int(res1['CA'] - res2['CA'])
# 3.12 Contact Map
def get_contact_map( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return a complete contact map (see description in exercise sheet)
for a given chain in a Biopython.PDB structure as numpy array.
The values in the matrix describe the c-alpha distance between all residues
in a chain of a Biopython.PDB structure.
Only integer values of the distance have to be given (see below).
'''
chain = self.structure[0][chain_id]
i = 1
count = 0
new_chain = []
for res in chain:
try:
res['CA']
new_chain.append(res)
count += 1
except KeyError:
pass
i += 1
length = len(new_chain)
contact_map = np.zeros((length, length), dtype=np.float32)
i = 0
while i < length:
j = 0
while j < length:
res1 = new_chain[i]
res2 = new_chain[j]
contact_map[i][j] = res1['CA'] - res2['CA']
j += 1
i += 1
return contact_map.astype(np.int) # return rounded (integer) values
# 3.13 B-Factors
def get_bfactors( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the B-Factors for all residues in a chain of a Biopython.PDB structure.
The B-Factors describe the mobility of an atom or a residue.
In a Biopython.PDB structure B-Factors are given for each atom in a residue.
Calculate the mean B-Factor for a residue by averaging over the B-Factor
of all atoms in a residue.
Sometimes B-Factors are not available for a certain residue;
(e.g. the residue was not resolved); insert np.nan for those cases.
Finally normalize your B-Factors using Standard scores (zero mean, unit variance).
You have to use np.nanmean, np.nanvar etc. if you have nan values in your array.
The returned data structure has to be a numpy array rounded again to integer.
'''
chain = self.structure[0][chain_id]
i = 1
new_chain = []
for res in chain:
try:
res['CA']
new_chain.append(res)
except KeyError:
pass
length = len(new_chain)
b_factors = np.zeros(length, dtype=np.float32)
i = 0
for residue in new_chain:
count = 0
sum = 0
for a in residue.get_atoms():
try:
sum += a.bfactor
count += 1
except Exception:
pass
try:
b_factors[i] = sum / count
except Exception:
b_factors[i] = np.nan
i += 1
b_factors = b_factors - np.nanmean(b_factors)
b_factors = b_factors / np.nanstd(b_factors)
return b_factors.astype(np.int) # return rounded (integer) values
def main():
PDB_Parser('tests/7ahl.cif').get_bfactors('A')
return None
if __name__ == '__main__':
main()<file_sep>import numpy as np
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.trace = {}
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.align()
def align(self):
"""
Align given strings using the Smith-Waterman algorithm.
NB: score matrix and the substitution matrix are different matrices!
"""
# Initialize
self.score_matrix[0][0] = 0
for row in range(len(self.string2) + 1):
self.score_matrix[row][0] = 0
for col in range(len(self.string1) + 1):
self.score_matrix[0][col] = 0
for row in range(1, len(self.string2) + 1):
for col in range(1, len(self.string1) +1):
diag = self.score_matrix[row-1][col-1] + self.substitution_matrix[self.string2[row-1]][self.string1[col-1]]
up = self.score_matrix[row-1][col] + self.gap_penalty
left = self.score_matrix[row][col-1] + self.gap_penalty
max_index = np.argmax([diag, up, left, 0])
self.score_matrix[row][col] = [diag, up, left, 0][max_index]
if max_index == 0:
self.trace[(row, col)] = "d"
elif max_index == 1:
self.trace[(row, col)] = "v" # vertical
elif max_index == 2:
self.trace[(row, col)] = "h" # horizontal
print (self.score_matrix)
def has_alignment(self):
"""
:return: True if a local alignment has been found, False otherwise
"""
return np.max(self.score_matrix) != 0
def search_local_alignment(self, row, col):
alignment = ["",""]
while self.score_matrix[row][col] != 0:
t = self.trace[(row, col)]
if t == "d":
alignment[0] = self.string1[col-1] + alignment[0]
alignment[1] = self.string2[row-1] + alignment[1]
row -= 1
col -= 1
elif t == "v":
alignment[0] = "-" + alignment[0]
alignment[1] = self.string2[row-1] + alignment[1]
row -= 1
elif t == "h":
alignment[0] = self.string1[col-1] + alignment[0]
alignment[1] = "-" + alignment[1]
col -= 1
return tuple(alignment)
def get_alignment(self):
"""
:return: alignment represented as a tuple of aligned strings
"""
max_value = np.max(self.score_matrix)
self.row_indices, self.col_indices = np.where(self.score_matrix == max_value)
alignments = []
for index in range(len(self.row_indices)):
align = self.search_local_alignment(self.row_indices[index], self.col_indices[index])
if align != ("",""):
alignments.append(align)
if alignments == []:
alignments = ["",""]
#print ("string1: {}, string2: {}".format(self.string1, self.string2))
#print (self.score_matrix)
#print (self.trace)
#print (alignments)
if len(alignments) > 1:
alignments = tuple(alignments)
elif len(alignments):
alignments = alignments[0]
return alignments
def is_residue_aligned(self, string_number, residue_index):
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been alined
False otherwise
"""
# Get all local alignments
alignments = self.get_alignment();
i = 0
if string_number == 1:
string = self.string1
align = alignments[0]
indices = self.col_indices
else:
string = self.string2
align = alignments[1]
indices = self.row_indices
upper = indices[0]
lower = upper - len(align)
print (self.col_indices, self.row_indices)
print (lower, upper)
if residue_index <= upper and residue_index >= lower:
print (True)
return True
print (False)
return False
<file_sep># -*- coding: utf-8 -*-
"""
IMPORTANT!:
Before writing an email asking questions such as
'What does this input has to be like?' or
'What return value do you expect?' PLEASE read our
exercise sheet and the information in this template
carefully.
If something is still unclear, PLEASE talk to your
colleagues before writing an email!
If you experience technical issues or if you find a
bug we are happy to answer your questions. However,
in order to provide quick help in such cases we need
to avoid unnecessary emails such as the examples
shown above.
"""
from Bio.PDB.MMCIFParser import MMCIFParser # Tip: This module might be useful for parsing...
import numpy as np
############# Exercise 2: Protein Data Bank #############
# General remark: In our exercise every structure will have EXACTLY ONE model.
# This is true for nearly all X-Ray structures. NMR structures have several models.
class PDB_Parser:
CIF_PARSER = MMCIFParser(QUIET = True) # parser object for reading in structure in CIF format
def __init__( self, path ):
'''
Initialize every PDB_Parser with a path to a structure-file in CIF format.
An example file is included in the repository (7ahl.cif).
Tip: Store the parsed structure in an object variable instead of parsing it
again & again ...
'''
self.aa = {'CYS': 'C', 'ASP': 'D', 'SER': 'S', 'GLN': 'Q', 'LYS': 'K',
'ILE': 'I', 'PRO': 'P', 'THR': 'T', 'PHE': 'F', 'ASN': 'N',
'GLY': 'G', 'HIS': 'H', 'LEU': 'L', 'ARG': 'R', 'TRP': 'W',
'ALA': 'A', 'VAL': 'V', 'GLU': 'E', 'TYR': 'Y', 'MET': 'M'}
self.structure = None # Parse the structure once and re-use it in the functions below
self.mmcif_dict = None
self.parse_data(path)
self.model = self.structure.get_models()
self.models = list(self.model)
self.chains = list(self.models[0].get_chains())
self.dehydrated_residues = {}
self.sequences = {}
self.water = {}
def parse_data(self, path):
self.structure = self.CIF_PARSER.get_structure("7AHL",path)
# 3.8 Chains
def get_number_of_chains( self ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
Return:
Number of chains in this structure as integer.
'''
n_chains = len(self.chains)
return n_chains
# 3.9 Sequence
def get_sequence( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the amino acid sequence (single-letter alphabet!) of a given chain (chain_id)
in a Biopython.PDB structure as a string.
'''
if chain_id in self.sequences:
return self.sequences[chain_id]
chain_num = ord(chain_id) - 65
residues = list(self.chains[chain_num].get_residues())
sequence = ""
dehydrated = []
count = 0
for el in residues:
if el.resname == "HOH":
count+=1
else:
sequence += self.aa[el.resname]
if "CA" in el:
dehydrated.append(el)
self.water[chain_id] = count
self.sequences[chain_id] = sequence
self.dehydrated_residues[chain_id] = dehydrated
return sequence
# 3.10 Water molecules
def get_number_of_water_molecules( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the number of water molecules of a given chain (chain_id)
in a Biopython.PDB structure as an integer.
'''
if chain_id not in self.water:
self.get_sequence(chain_id)
n_waters = self.water[chain_id]
return n_waters
# 3.11 C-Alpha distance
def get_ca_distance( self, chain_id_1, index_1, chain_id_2, index_2 ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id_1 : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
index_1 : index of a residue in a given chain in a Biopython.PDB structure
chain_id_2 : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
index_2 : index of a residue in a given chain in a Biopython.PDB structure
chain_id_1 and index_1 describe precisely one residue in a PDB structure,
chain_id_2 and index_2 describe the second residue.
Return:
Return the C-alpha (!) distance between the two residues, described by
chain_id_1/index_1 and chain_id_2/index_2. Round the returned value via int().
The reason for using two different chains as an input is that also the distance
between residues of different chains can be interesting.
Different chains in a PDB structure can either occur between two different proteins
(Heterodimers) or between different copies of the same protein (Homodimers).
'''
self.get_sequence(chain_id_1)
self.get_sequence(chain_id_2)
residues1 = self.dehydrated_residues[chain_id_1]
residues2 = self.dehydrated_residues[chain_id_2]
#ca_distance = residues1[index_1]['CA'] - residues2[index_2]['CA']
ca_distance = self.structure[0][chain_id_1][index_1]["CA"] - self.structure[0][chain_id_2][index_2]["CA"]
return int( ca_distance )
# ca_distance = self.structure[0][chain_id_1][index_1]["CA"] - self.structure[0][chain_id_2][index_2]["CA"]
def get_ca_distance_aux( self, chain_id_1, index_1, chain_id_2, index_2 ):
self.get_sequence(chain_id_1)
self.get_sequence(chain_id_2)
residues1 = self.dehydrated_residues[chain_id_1]
residues2 = self.dehydrated_residues[chain_id_2]
ca_distance = residues1[index_1]['CA'] - residues2[index_2]['CA']
return int( ca_distance )
# 3.12 Contact Map
def get_contact_map( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return a complete contact map (see description in exercise sheet)
for a given chain in a Biopython.PDB structure as numpy array.
The values in the matrix describe the c-alpha distance between all residues
in a chain of a Biopython.PDB structure.
Only integer values of the distance have to be given (see below).
'''
self.get_sequence(chain_id)
length = len(self.dehydrated_residues[chain_id])
contact_map = np.zeros( (length,length), dtype=np.float32 )
for idx1 in range(length):
for idx2 in range(length):
contact_map[idx1,idx2] = self.get_ca_distance_aux(chain_id, idx1, chain_id, idx2)
return contact_map.astype( np.int ) # return rounded (integer) values
# 3.13 B-Factors
def get_bfactors( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the B-Factors for all residues in a chain of a Biopython.PDB structure.
The B-Factors describe the mobility of an atom or a residue.
In a Biopython.PDB structure B-Factors are given for each atom in a residue.
Calculate the mean B-Factor for a residue by averaging over the B-Factor
of all atoms in a residue.
Sometimes B-Factors are not available for a certain residue;
(e.g. the residue was not resolved); insert np.nan for those cases.
Finally normalize your B-Factors using Standard scores (zero mean, unit variance).
You have to use np.nanmean, np.nanvar etc. if you have nan values in your array.
The returned data structure has to be a numpy array rounded again to integer.
'''
length = len(self.get_sequence(chain_id))
b_factors = np.zeros( length, dtype=np.float32 )
chain_num = ord(chain_id) - 65
residues = list(self.chains[chain_num].get_residues())
for idx1 in range(length):
atoms = list(residues[idx1].get_atoms())
array1 = np.zeros( len(atoms), dtype=np.float32 )
for idx2 in range(len(atoms)):
if not atoms[idx2].get_bfactor():
array1[idx2] = np.nan
continue
array1[idx2] = atoms[idx2].get_bfactor()
b_factors[idx1] = np.nanmean(array1)
b_factors-=np.nanmean(b_factors)
b_factors/=np.nanstd(b_factors)
return b_factors.astype( np.int ) # return rounded (integer) values
def main():
print('PDB parser class.')
return None
if __name__ == '__main__':
main()
<file_sep>##############
# Exercise 2.5
##############
# You can use the supplied test cases for your own testing. Good luck!
def get_orfs(genome):
pass<file_sep>##############
# Exercise 2.6
##############
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
pass
def get_average_length(self):
pass
def read_fasta(self, path):
pass
def get_abs_frequencies(self):
# return number of occurences not normalized by length
pass
def get_av_frequencies(self):
# return number of occurences normalized by length
pass
<file_sep>##############
# Exercise 2.7
##############
# ARNDCEQGHOILKMFPUSTWYV
#
# sulfur Methionine, cysteine, M, C
# basic Lysine (K), Agrinine(R), Histdine(H)
# acid Aspartic (D), Glutamate (Q)
# prolein Prolein (P)
# aromatic Phenylalanine (F), Tryptophan(W), Trosine (Y), Histidine (H)
# Hydro Alanine A • Isoleucine - Ile - I • Leucine - Leu - L • Methionine - Met - M• Phenylalanine - Phe - F • Valine - Val - V • Proline - Pro - P • Glycine - Gly - G
# POLAR
# Glutamine - Gln - Q
# • Asparagine - Asn - N
# • Histidine - His - H
# • Serine - Ser - S
# • Threonine - Thr - T
# • Tyrosine - Tyr - Y
# • Cysteine - Cys - C
#
postiv = {"K", "R", "H"}
negativ = {"D","E"}
aromatic = {"F", "W", "Y", "H"}
basic = {"K", "R", "H"}
acid = {"D", "E"}
polar = {"N","Q","S","T","Y","E","D","R","H","K"}
hyrdo = {"W", "I", "L", "M", "F", "V", "Y","A"}
prolein = {"P"}
sulfur = {'M', 'C'}
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
return aa in postiv
def isNegativelyCharged(aa):
return aa in negativ
def isHydrophobic(aa):
return aa in hyrdo
def isAromatic(aa):
return aa in aromatic
def isPolar(aa):
return aa in polar
def isProline(aa):
return aa in prolein
def containsSulfur(aa):
return aa in sulfur
def isAcid(aa):
return aa in acid
def isBasic(aa):
return aa in basic
<file_sep>##############
# Exercise 2.7
##############
props = {
'A': ['no-polar', 'hydrophobic'],
'R': ['+charge', 'polar', 'hydrophilic', 'basic'],
'N': ['polar', 'hydrophilic', 'no-charge'],
'D': ['-charge', 'polar', 'hydrophilic', 'acid'],
'C': ['no-charge', 'no-polar', 'hydrophilic', 'sulfur'],
'E': ['-charge','polar', 'hydrophilic', 'acid'],
'Q': ['no-charge', 'polar', 'hydrophilic'],
'G': ['no-charge', 'no-polar', 'hydrophilic'],
'H': ['+charge','polar', 'hydrophilic', 'aromatic', 'basic'],
'I': ['no-polar', 'hydrophobic', 'aliphatic'],
'L': ['no-polar', 'hydrophobic', 'aliphatic'],
'K': ['+charge', 'polar', 'hydrophilic', 'basic'],
'M': ['no-polar', 'hydrophobic', 'sulfur', 'aliphatic'],
'F': ['no-polar', 'hydrophobic', 'aromatic'],
'P': ['no-polar'],
'S': ['no-charge', 'polar', 'hydrophilic'],
'T': ['no-charge', 'polar', 'hydrophilic'],
'W': ['no charge', 'no-polar', 'hydrophobic', 'aromatic'],
'Y': ['no-charge', 'polar', 'hydrophobic', 'aromatic'],
'V': ['no-polar','hydrophobic', 'aliphatic']
}
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
properties = props[aa.upper()]
if "+charge" in properties:
return True
return False
def isNegativelyCharged(aa):
properties = props[aa.upper()]
if "-charge" in properties:
return True
return False
def isHydrophobic(aa):
properties = props[aa.upper()]
if "hydrophobic" in properties:
return True
return False
def isAromatic(aa):
properties = props[aa.upper()]
if "aromatic" in properties:
return True
return False
def isPolar(aa):
properties = props[aa.upper()]
if "polar" in properties:
return True
return False
def isProline(aa):
if aa.upper() == 'P':
return True
return False
def containsSulfur(aa):
properties = props[aa.upper()]
if "sulfur" in properties:
return True
return False
def isAcid(aa):
properties = props[aa.upper()]
if "acid" in properties:
return True
return False
def isBasic(aa):
properties = props[aa.upper()]
if "basic" in properties:
return True
return False
<file_sep>import numpy as np
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substituion_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
# Some "private" helper attributes
self._cols = len(string1) + 1
self._rows = len(string2) + 1
self._traceback_matrix = np.array([[None] * self._cols for i in range(self._rows)])
self._alignments = []
# Some "private" helper attributes
self.align()
def fill_matrices(self):
"""
Fill in each cell of the score matrix according to Needleman-Wunch algorithn,
and create the corresponding traceback matrix
"""
score_top_left = 0 # Score value for a diagonal step
score_top = 0 # Score value for an up step
score_left = 0 # Score value for a left step
for row in range(self._rows):
self.score_matrix[row, 0] = row * self.gap_penalty
self._traceback_matrix[row, 0] = "ˆ" # step up
for col in range(self._cols):
self.score_matrix[0, col] = col * self.gap_penalty
self._traceback_matrix[0, col] = "<" # step left
for row in range(1, self._rows, 1):
for col in range(1, self._cols, 1):
# Needleman-Wunsch formula calculations
# Diagonal score
score_top_left = self.score_matrix[row - 1, col - 1] + self.substituion_matrix[self.string2[row-1]][self.string1[col-1]]
# Upper score
score_top = self.score_matrix[row - 1, col] + self.gap_penalty
# Left score
score_left = self.score_matrix[row, col - 1] + self.gap_penalty
# Cell's score
score = max(score_top_left, score_top, score_left)
self.score_matrix[row, col] = score
# Store step taken in traceback matrix
steps = []
if (score == score_top_left):
steps.append("\\")
if (score == score_top):
steps.append("ˆ")
if (score == score_left):
steps.append("<")
self._traceback_matrix[row, col] = steps
def __get_alignments(self):
"""
Get all the optimal alignments
"""
steps_stack = []
row = self._rows - 1
col = self._cols - 1
alignment_a = []
alignment_b = []
score = 0
while True:
# We reached the top left corner in the matrix, so we end the alignment
if (row, col) == (0, 0):
self._alignments.append((score, ("".join(alignment_b), "".join(alignment_a))))
if (steps_stack == []): # End of alignment
break
else: # Perform step back in the matrix
(row, col, steps, alignment_a, alignment_b) = steps_stack.pop()
score = 0
else: # Where do we move next?
steps = list(self._traceback_matrix[row, col])
step = steps.pop()
if (steps != []):
steps_stack.append((row, col, steps, list(alignment_a), list(alignment_b)))
if step == "\\":
alignment_a.insert(0, self.string2[row - 1])
alignment_b.insert(0, self.string1[col - 1])
score += self.substituion_matrix[self.string2[row - 1]][self.string1[col - 1]]
row -= 1
col -= 1
elif step == "ˆ":
alignment_b.insert(0, "-")
alignment_a.insert(0, self.string2[row - 1])
score += self.gap_penalty
row -= 1
elif step == "<":
alignment_b.insert(0, self.string1[col - 1])
alignment_a.insert(0, "-")
score += self.gap_penalty
col -= 1
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
self.fill_matrices()
self.__get_alignments()
def get_best_score(self):
"""
:return: the highest score for the aligned strings, int
"""
#return max([score[0] for score in self._alignments])
return self.score_matrix[self._rows - 1, self._cols - 1]
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score
"""
return int(len(set(self._alignments)))
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
return list(set([alignment[1] for alignment in self._alignments]))
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as a list of lists
"""
return self.score_matrix
<file_sep>import numpy as np
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
self.sequences = sequences
if len(sequences) == 0:
raise TypeError('Empty list of sequences')
self.seq_length = len(sequences[0])
for sequence in sequences:
if len(sequence) != self.seq_length:
raise TypeError('All sequences do not have same length')
# ToDo - Convert both sequence and alphabet to Set and check if cardinality of their
# union is not larger than cardinality of alphabet
for s in sequence:
if s not in ALPHABET:
raise TypeError(f'Invalid alphabet {s} in sequence {sequence}')
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return len(self.sequences), len(self.sequences[0])
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
primary_sequence: str = self.sequences[0]
return primary_sequence.replace('-', '')
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
# weights = np.zeros(self.get_size()(0))
self.amino_acids_per_col = [dict() for x in range(self.seq_length)]
for i in range(self.seq_length):
amino_acids_at_index_i = dict()
for sequence in self.sequences:
amino_acids_at_index_i[sequence[i]] = amino_acids_at_index_i.get(sequence[i], 0) + 1
self.amino_acids_per_col[i] = amino_acids_at_index_i
self.r_i = [len(self.amino_acids_per_col[i].keys()) for i in range(self.seq_length)]
# Usage to get s[i][alphabet]
# i = 2
# alphabet = 'V'
# s_i_alphabet = self.amino_acids_per_col[i].get(alphabet)
sequence_weights_matrix = np.zeros((len(self.sequences), self.seq_length))
sequence_weights = np.zeros(len(self.sequences))
for row_index, sequence in enumerate(self.sequences):
for col_index, alphabet in enumerate(sequence):
sequence_weights_matrix[row_index, col_index] = 1 / (
self.r_i[col_index] * self.amino_acids_per_col[col_index][alphabet])
if self.r_i[col_index] > 1:
sequence_weights[row_index] += sequence_weights_matrix[row_index, col_index]
print(f'sequence_weights_matrix={sequence_weights_matrix}')
print(f'sequence_weights={sequence_weights}')
return sequence_weights.astype(np.float64)
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
self.get_sequence_weights() # Computes self.r_i
num_obs = np.float64(sum(i for i in self.r_i)) / self.seq_length
return num_obs
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
pssm - position-specific scoring matrix
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param use_sequence_weights: Calculate and apply sequence weights.
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
if use_sequence_weights:
sequence_weights = self.get_sequence_weights() # Computes self.r_i as well
if bg_matrix:
bg_matrix = np.array([np.array(xi) for xi in bg_matrix]) # Convert to numpy array
else:
bg_matrix = np.zeros((20, 20)) + 1.0 / (20 * 20) # len(ALPHABET) - 1 = 20
# Compute background frequency of each amino acid using row/column sum. Note bg_matrix is symmetric.
amino_acid_background_frequency = bg_matrix.sum(axis=1).reshape(20, 1)
counts = np.zeros((len(ALPHABET), self.seq_length))
for sequence_index, sequence in enumerate(self.sequences):
single_amino_acid_weight = 1
if use_sequence_weights:
single_amino_acid_weight = sequence_weights[sequence_index]
for i, sequence_alphabet in enumerate(sequence):
counts[AA_TO_INT[sequence_alphabet], i] += single_amino_acid_weight
if redistribute_gaps:
# Add weighted background probability. Copy last row(-) to all rows weighted by amino acid frequency.
counts[0:len(ALPHABET) - 1, :] += counts[len(ALPHABET) - 1, :] * amino_acid_background_frequency
# Delete count gaps row, no longer needed
counts = np.delete(counts, (len(ALPHABET) - 1), axis=0)
# Add pseudocounts
if add_pseudocounts:
# Compute pseudocounts
pseudocounts = np.zeros(counts.shape)
for row in range(counts.shape[0]):
for col in range(counts.shape[1]):
if counts[row, col] == 0:
continue
# counts[row, col] is an amino acid with non zero count. Distribute the count to pseudocounts column
pseudocounts[:, col] += (counts[row][col] * bg_matrix[row, :]) / amino_acid_background_frequency[
row, 0]
# Compute independent_observations
N = self.get_number_of_observations()
counts = ((N - 1) * counts + pseudocounts * beta) / (N - 1 + beta)
normalized_counts = counts / np.sum(counts, axis=0)
pssm = 2 * np.log2(normalized_counts / amino_acid_background_frequency)
pssm[np.isinf(pssm)] = -20 # Set to -20 value
# Remove columns where primary sequence has gaps.
pssm = pssm[:, [i for i, primary_seq_char in enumerate(self.sequences[0]) if primary_seq_char is not '-']]
return np.rint(pssm.T).astype(np.int64)
<file_sep>import numpy as np
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
# for each cell, store from which other cell it was reached
self.path_matrix = [[(-1, -1) for _ in range(len(string1) + 1)] for _ in range(len(string2) + 1)]
self.align()
def align(self):
"""
Align given strings using the Smith-Waterman algorithm.
NB: score matrix and the substitution matrix are different matrices!
"""
subst = self.substitution_matrix
scores = self.score_matrix
path = self.path_matrix
gap = self.gap_penalty
# fill first row and column with 0
for i in range(len(self.string1) + 1):
scores[0][i] = 0
for i in range(len(self.string2) + 1):
scores[i][0] = 0
# fill other cells, indices are on strings (add 1 for scores)
for s1 in range(len(self.string1)):
for s2 in range(len(self.string2)):
s1_char = self.string1[s1]
s2_char = self.string2[s2]
# compute scores
diag = scores[s2][s1] + subst[s1_char][s2_char]
vert = scores[s2 + 1][s1] + gap
horz = scores[s2][s1 + 1] + gap
# update best score
score = max(diag, vert, horz, 0)
scores[s2 + 1][s1 + 1] = score
# update path: save from which cell this one was reached
# difference to global alignment: only store the best path
if score > 0:
if diag == score:
path[s2 + 1][s1 + 1] = (s2, s1)
elif vert == score:
path[s2 + 1][s1 + 1] = (s2 + 1, s1)
if horz == score:
path[s2 + 1][s1 + 1] = (s2, s1 + 1)
def has_alignment(self):
"""
:return: True if a local alignment has been found, False otherwise
"""
return np.max(self.score_matrix) > 0
def get_alignment(self):
"""
:return: alignment represented as a tuple of aligned strings
"""
if not self.has_alignment():
return '', ''
# get indices of score maximum
f2, f1 = np.unravel_index(np.argmax(self.score_matrix), self.score_matrix.shape)
al1, al2 = '', ''
while self.score_matrix[f2][f1] > 0:
s1_char = self.string1[f1 - 1]
s2_char = self.string2[f2 - 1]
f2_prev, f1_prev = self.path_matrix[f2][f1]
if f2_prev + 1 == f2 and f1_prev + 1 == f1:
# coming from diagonal -> append chars to both strings: (X, Y) -> (Xa, Yb)
al1 = s1_char + al1
al2 = s2_char + al2
elif f2_prev == f2 and f1_prev + 1 == f1:
# coming from horizontal -> append char only to string 1: (X, Y) -> (Xa, Y-)
al1 = s1_char + al1
al2 = '-' + al2
else:
# coming from vertical -> append char only to string 2: (X, Y) -> (X-, Yb)
al1 = '-' + al1
al2 = s2_char + al2
f1 = f1_prev
f2 = f2_prev
return al1, al2
def is_residue_aligned(self, string_number, residue_index):
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been alined
False otherwise
"""
if not self.has_alignment():
return False
# starting indices
f2_max, f1_max = np.unravel_index(np.argmax(self.score_matrix), self.score_matrix.shape)
f2_min, f1_min = f2_max, f1_max
# follow the path until zero-score is found
while True:
f2_next, f1_next = self.path_matrix[f2_min][f1_min]
if self.score_matrix[f2_next][f1_next] == 0:
break # next field would be zero -> stop here
else:
f2_min, f1_min = f2_next, f1_next
residue_index += 1 # convert to score matrix indices (1 larger than string index)
if string_number == 1:
return not not (f1_min <= residue_index <= f1_max) # aligned if is in between first index
else:
return not not (f2_min <= residue_index <= f2_max) # aligned if is in between second index
<file_sep>import numpy as np
from pathlib import Path
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.db_seq_idx = 0
self.db_seq_list = []
self.db_word_hash = dict()
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
words = set([sequence[i: i + 3] for i in range(len(sequence) - 2)])
for word in words:
if word not in self.db_word_hash:
self.db_word_hash[word] = set()
self.db_word_hash[word].add(self.db_seq_idx)
self.db_seq_idx += 1
self.db_seq_list.append(sequence)
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
return [self.db_seq_list[i] for i in self.db_word_hash.get(word, [])]
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
num_seqs = self.db_seq_idx
num_words = len(self.db_word_hash.keys())
seq_word_sum = sum([len(v) for v in self.db_word_hash.values()])
words_per_seq = round(seq_word_sum / num_seqs)
seqs_per_word = round(seq_word_sum / num_words)
return (num_seqs, num_words, words_per_seq, seqs_per_word)
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.sub_mat = substitution_matrix
def make_pssm(self, sequence):
seq_to_int = [AA_TO_INT[aa] for aa in sequence]
return self.sub_mat[seq_to_int]
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
if sequence is None and pssm is None:
return None
elif pssm is None:
pssm = self.make_pssm(sequence)
return list(self.generate_words(pssm, T=T).keys())
def generate_words(self, pssm, *, T=11):
idx = range(20)
words = dict()
for a in idx:
for b in idx:
for c in idx:
m = pssm[:, [a, b, c]]
m[:, 1] = np.roll(m[:, 1], -1)
m[:, 2] = np.roll(m[:, 2], -2)
m_sums = np.einsum('ij->i', m)
m_sums = m_sums[:-2]
m_sums = np.nonzero(m_sums >= T)[0]
if m_sums.size > 0:
word = INT_TO_AA[a] + INT_TO_AA[b] + INT_TO_AA[c]
words[word] = tuple(m_sums)
return words
def get_targets(self, words, blast_db):
target_list = dict()
for word in words:
targets = blast_db.get_sequences(word)
for target in targets:
if target not in target_list:
target_list[target] = set()
target_list[target].add(word)
return target_list
def get_word_positions(self, sequence, word):
word_positions = tuple()
search_position = 0
while True:
search_position = sequence.find(word, search_position)
if search_position >= 0:
word_positions = word_positions + (search_position,)
else:
break
search_position += 1
return word_positions
def extend_left(self, pssm, target, q_off, t_off, X=5):
score = 0
max_score = 0
n = min(q_off, t_off)
best_i = 0
for i in range(1, n + 1):
score += pssm[q_off - i][AA_TO_INT[target[t_off - i]]]
if score > max_score:
best_i = i
max_score = score
elif (max_score - score) >= X:
break
return max_score, best_i
def extend_right(self, pssm, target, q_off, t_off, *, X=5):
score = 0
max_score = 0
n = min(pssm.shape[0] - q_off, len(target) - t_off)
for i in range(3):
score += pssm[q_off + i][AA_TO_INT[target[t_off + i]]]
best_i = 2
max_score = score
for i in range(3, n):
score += pssm[q_off + i][AA_TO_INT[target[t_off + i]]]
if score > max_score:
best_i = i
max_score = score
elif (max_score - score) >= X:
break
return max_score, best_i
def get_hsp_one_hit(self, pssm, target, q_off, t_off, *, X=5, S=20):
left_score, left_i = self.extend_left(pssm,
target,
q_off,
t_off,
X=X)
right_score, right_i = self.extend_right(pssm,
target,
q_off,
t_off,
X=X)
total_score = left_score + right_score
if total_score >= S:
q_start = q_off - left_i
t_start = t_off - left_i
hsp_length = left_i + right_i + 1
return q_start, t_start, hsp_length, total_score
return None
def get_hsp_two_hit(self, pssm, target, diag_dict, *, X=5, S=20, A=40):
hsp_list = []
for d_off, hits in diag_dict.items():
last_ext = -1
last_hit = None
if len(hits) >= 2:
hits.sort()
for hit in hits:
if hit <= last_ext:
continue
if last_hit is None:
last_hit = hit
continue
dist = hit - last_hit
if dist < 3:
continue
elif dist > A:
last_hit = hit
continue
q_off = hit
t_off = hit - d_off
left_score, left_i = self.extend_left(pssm,
target,
q_off,
t_off,
X=X)
if hit - left_i > last_hit + 2:
last_hit = hit
continue
right_score, right_i = self.extend_right(pssm,
target,
q_off,
t_off,
X=X)
last_hit = None
last_ext = hit + right_i
total_score = int(left_score + right_score)
if total_score >= S:
q_start = int(q_off - left_i)
t_start = int(t_off - left_i)
hsp_length = int(left_i + right_i + 1)
hsp_list.append((q_start,
t_start,
hsp_length,
total_score))
return hsp_list
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
if query is None and pssm is None:
return None
elif pssm is None:
pssm = self.make_pssm(query)
words = self.generate_words(pssm, T=T)
targets = self.get_targets(words, blast_db)
hsp_dict = dict()
for target, target_words in targets.items():
hsp_list = set()
for word in target_words:
query_offsets = words[word]
target_offsets = self.get_word_positions(target, word)
for q_off in query_offsets:
for t_off in target_offsets:
hsp = self.get_hsp_one_hit(pssm,
target,
q_off,
t_off,
X=X,
S=S)
if hsp is not None:
hsp_list.add(hsp)
if hsp_list:
hsp_dict[target] = list(hsp_list)
return hsp_dict
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
if query is None and pssm is None:
return None
elif pssm is None:
pssm = self.make_pssm(query)
words = self.generate_words(pssm, T=T)
targets = self.get_targets(words, blast_db)
hsp_dict = dict()
for target, target_words in targets.items():
diag_dict = dict()
for word in target_words:
query_offsets = words[word]
target_offsets = self.get_word_positions(target, word)
for q_off in query_offsets:
for t_off in target_offsets:
d_off = q_off - t_off
if d_off not in diag_dict:
diag_dict[d_off] = []
diag_dict[d_off].append(q_off)
hsp_list = self.get_hsp_two_hit(pssm,
target,
diag_dict,
X=X,
S=S,
A=A)
if hsp_list:
hsp_dict[target] = hsp_list
return hsp_dict
<file_sep>#!/bin/bash
links_list=""
while IFS= read -r line
do
echo $line
links_list="$links_list $line"
echo $links_list
done < "moss_links.txt"<file_sep>##############
# Exercise 2.6
##############
from collections import Counter
import re
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
return len(self.__sequences)
def get_average_length(self):
average = 0
for s in self.__sequences:
average += len(s[1])
return average / self.get_counts()
def read_fasta(self, path):
sequences = []
with open(path, 'r') as f:
header = ''
seq = ''
sequence_started = False
for line in f:
if line.startswith('>') or line.startswith(';'):
header = line.strip()
if sequence_started:
seq = re.sub("[^ARNDBCEQZGHILKMFPSTWYV]", "", seq)
sequence = (header, seq)
sequences.append(sequence)
seq = ''
sequence_started = False
continue
sequence_started = True
seq += line.strip()
seq = re.sub("[^ARNDBCEQZGHILKMFPSTWYV]", "", seq)
sequence = (header, seq)
sequences.append(sequence)
self.__sequences = sequences
return sequences
def get_abs_frequencies(self):
total_sequence = ''
for s in self.__sequences:
total_sequence += s[1]
counted = Counter(total_sequence)
result = {}
for key in counted:
if key in "ARNDBCEQZGHILKMFPSTWYV":
result[key] = counted[key]
return result
def get_av_frequencies(self):
counted = self.get_abs_frequencies()
sequences_length = self.get_counts() * self.get_average_length()
for key in counted:
counted[key] /= sequences_length
return counted
if __name__ == '__main__':
obj = AADist("./tests/tests.fasta")
#print(obj.get_average_length())<file_sep>import numpy as np
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.traceback_matrix = np.ones((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.alignments = []
self.align()
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
self.score_matrix[0][0] = 0
for i in range(len(self.string1)):
self.score_matrix[0][i+1] = self.score_matrix[0][i] + self.gap_penalty
self.traceback_matrix[0][i+1] *= 7
for i in range(len(self.string2)):
self.score_matrix[i+1][0] = self.score_matrix[i][0] + self.gap_penalty
self.traceback_matrix[i+1][0] *= 5
for i in range(len(self.string2)): # indices running over strings, add +1 for score and traceback matrices
for j in range(len(self.string1)):
match_score = self.substitution_matrix[self.string2[i]][self.string1[j]]
score_diag = self.score_matrix[i][j] + match_score
score_hgap = self.score_matrix[i][j+1] + self.gap_penalty
score_vgap = self.score_matrix[i+1][j] + self.gap_penalty
best = max(score_diag, score_hgap, score_vgap)
self.score_matrix[i+1][j+1] = best
if score_diag == best:
self.traceback_matrix[i+1][j+1] *= 3
if score_hgap == best:
self.traceback_matrix[i+1][j+1] *= 5
if score_vgap == best:
self.traceback_matrix[i+1][j+1] *= 7
self.alignments = self.traceback(len(self.string2), len(self.string1))
def traceback(self, i, j):
if i == 0 and j == 0:
return [('','')]
alignments = []
if self.traceback_matrix[i][j] % 3 == 0:
for alignment in self.traceback(i-1, j-1):
alignments.append((alignment[0] + self.string1[j-1], alignment[1] + self.string2[i-1]))
if self.traceback_matrix[i][j] % 5 == 0:
for alignment in self.traceback(i-1, j):
alignments.append((alignment[0] + '-', alignment[1] + self.string2[i-1]))
if self.traceback_matrix[i][j] % 7 == 0:
for alignment in self.traceback(i, j-1):
alignments.append((alignment[0] + self.string1[j-1], alignment[1] + '-'))
return alignments
def get_best_score(self):
"""
:return: the highest score for the aligned strings, int
"""
return self.score_matrix[len(self.string2)][len(self.string1)]
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score
"""
return len(self.alignments)
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
return self.alignments
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as a list of lists
"""
return self.score_matrix
<file_sep>import numpy as np
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
if sequences is None:
raise TypeError
if len(sequences) < 1:
raise TypeError
for sequence in sequences:
for aa in sequence:
if aa not in ALPHABET:
raise TypeError
length = len(sequences[0])
for sequence in sequences[1:]:
if length != len(sequence):
raise TypeError
self.number_of_sequences = len(sequences)
self.msa_length = len(sequences[0])
self.sequences = sequences
# Do weights
w = np.zeros((len(sequences),len(sequences[0])))
for row in range(len(sequences)):
for col in range(len(sequences[0])):
w[row][col] = 1 / (self.get_r(col) * self.get_s(row, col))
for col in range(len(sequences[0])):
r = self.get_r(col)
if r == 1:
for row in range(len(sequences)):
w[row][col] = 0
self.weights = np.sum(w, axis=1)
# Number of independent observations
# N = (1/L) * sum[i = 1 to L] (r_i)
# N is estimated number of independent observations
# L is the number of MSA columns
# r_i is the numer of different observed amino acids in MSA column i
# (count gaps as a 21st amino acid)
L = len(self.sequences[0])
r_sum = 0
for i in range(L):
r_sum += self.get_r(i)
self.N = r_sum / L
def get_s(self, row, col):
count = 0
for s in self.sequences:
if s[col] == self.sequences[row][col]:
count += 1
return count
def get_r(self, col):
r = []
for s in self.sequences:
r.append(s[col])
return len(set(r))
def test():
pass
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
#pssm = np.zeros(self.get_size())
if redistribute_gaps:
aa_len = 21
else:
aa_len = 20
bg_freqs = np.zeros((20,))
if (bg_matrix is not None):
bg_freqs = np.sum(bg_matrix, axis=0)
else:
bg_freqs[bg_freqs == 0] = 0.05
# Create Matrix
num_sequences, msa_length = self.get_size()
m = np.zeros((msa_length, aa_len))
# Calculate sequence weights
weights = self.get_sequence_weights()
# Calculate relative frequencies
f = np.zeros_like(m)
num_rows, num_cols = f.shape
for row in range(num_rows):
for col in range(num_cols):
for s_row in range(num_sequences):
if self.sequences[s_row][row] == ALPHABET[col]:
f[row][col] += weights[s_row]
# Count (with weights) observed amino acids and gaps
num_rows, num_cols = m.shape
for row in range(num_rows):
for col in range(num_cols):
for s_row in range(num_sequences):
if self.sequences[s_row][row] == ALPHABET[col]:
m[row][col] += 1#weights[s_row]
if use_sequence_weights:
m = f
else:
f = np.zeros_like(f)
f[f == 0] = 0.0025
f = m
# Redistribute gaps
if redistribute_gaps:
for row in range(num_rows):
for col in range(num_cols-1):
m[row][col] += m[row][-1] * bg_freqs[col]
m = np.delete(m, -1, 1)
bg_tmp = np.zeros((20,20))
if bg_matrix is None:
bg_tmp[bg_tmp == 0] = 0.0025
else:
bg_tmp = bg_matrix
# Add weighted pseudocounts
g = np.zeros((msa_length, 20))
for a in range(20):
for i in range(msa_length):
for j in range(20):
f_ij = 0
for k in range(num_sequences):
if self.sequences[k][i] == ALPHABET[j]:
f_ij += 1
f_ij /= num_sequences
g[i][a] += (f[i][j]/bg_freqs[j]) * bg_tmp[j][a]
# Independent observations
N = self.get_number_of_observations()
s = m
print ("_________________________")
print (g)
print ("_________________________")
# Adjusted frequencies
if add_pseudocounts:
F = np.zeros_like(g)
alpha = N - 1
for i in range(msa_length):
for j in range(len(ALPHABET)-1):
#if (len(f[i]) > 21):
# f_i = np.delete(f[i], -1)
#else:
#f_i = f[i]
F[i][j] = (alpha * f[i][j] + beta*g[i][j]) / (alpha + beta)
s = F
#num_rows, num_cols = s.shape
#for i in range(num_rows):
# for j in range(num_cols):
# s[i][j] = 2 * np.log2(f[i][j] / bg_freqs[j])
# print (np.round(s))
# print (m)
#s = m
s /= np.sum(s, axis=1)[:, None]
#s /= 0.05
for i in range(len(s)):
for j in range(len(s[0])):
s[i][j] /= bg_freqs[j]
s = 2*np.log2(s)
# Normalize (divide by row sum)
#m /= np.sum(m, axis=1)[:, None]
# Divide by background frequencies
#m /= 0.05
#s = m
#
# f = m
# Pseudocounts1
# m = np.zeros((msa_length, 21))
# num_rows, num_cols = m.shape
# for row in range(num_rows):
# for col in range(num_cols):
# for s_row in range(num_sequences):
# if self.sequences[s_row][row] == ALPHABET[col]:
# m[row][col] += 1
# n = np.zeros((msa_length, 21))
# for row in range(msa_length):
# for col in range(20):
# m[row][col] += 1
# Pseudocounts2
# g = np.zeros((msa_length, 20))
# for a in range(20):
# for i in range(msa_length):
# for j in range(20):
# g[i][a] += (f[i][j]/P[j]) * bg_matrix[j, a]
#bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']]
# Redistribute gaps according to background frequencies
# Add weighted pseudocounts
# Normalize to relative frequencies
# Divide by background frequencies
# m /= 0.05
# Calculate Log-Score
# m = np.log2(m)
# m *= 2
# m = np.round(m)
# Remove rows corresponding to gaps in the primary sequence
pssm = s
nongap_indices = []
primary_sequence = self.sequences[0]
mask = np.ones(len(pssm), dtype=bool)
index = 0
for aa in primary_sequence:
if aa == "-":
mask[index] = False
index += 1
pssm = pssm[mask]
print("-")
print(pssm)
print("-")
# Redistributing gaps
pssm[pssm == -np.inf] = -20
return np.rint(pssm).astype(np.int64)
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return (self.number_of_sequences, self.msa_length)
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
return self.sequences[0].replace("-","")
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
return self.weights.astype(np.float64)
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
num_obs = self.N
return np.float64(num_obs)
<file_sep>import numpy as np
from numpy import inf
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
ALLOWED_CHARS = set(ALPHABET)
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
if len(sequences) == 0:
return TypeError()
seq_length = len(sequences[0])
for seq in sequences:
if len(seq) != seq_length:
return TypeError()
if not (set(seq).issubset(ALLOWED_CHARS)):
return TypeError()
self.sequences = sequences
self.number_of_sequences = len(self.sequences)
self.msa_length = seq_length
self.primary_sequence = self.sequences[0].replace(ALPHABET[GAP_INDEX], '')
self.calculate_sequence_weights()
def calculate_sequence_weights(self):
seqences_2D = np.array([ list(seq) for seq in self.sequences ])
weights = np.zeros(np.shape(seqences_2D)[0], dtype=float)
number_of_observed_aas = np.zeros(self.msa_length, dtype=float)
for column_index in range(self.msa_length):
observed_aas = np.unique(seqences_2D[:, column_index])
number_of_observed_aas[column_index] = len(observed_aas)
# r > 1
if len(observed_aas) > 1:
for aa in observed_aas:
observation_indices = np.where(seqences_2D[:, column_index] == aa)
for index in observation_indices:
current_weight = 1 / (len(observation_indices[0]) * len(observed_aas))
weights[index] += current_weight
self.weights = weights
self.observed_aas = np.sum(number_of_observed_aas) / self.msa_length
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
pssm = np.zeros((self.msa_length, 20), dtype=float)
# 1) Calculate sequence weights
weights = np.ones((len(self.sequences)), dtype=float)
if use_sequence_weights:
weights = self.weights
# 2) Count (with weights) observed amino acids and gaps
# for every aa in ALPHABET for every seq in the MSA count number of occurences
for seqIndex, seq in enumerate(self.sequences):
for row in range(self.msa_length):
for column in range(20):
if seq[row] == ALPHABET[column]:
pssm[row, column] += weights[seqIndex]
observed_aas = self.observed_aas
# 3) Redistribute gaps according to background frequencies
background_frequencies = np.full((20), 0.05, dtype=float)
substitution_matrix = np.full((20,20), 0.05, dtype=float)
if bg_matrix != None:
substitution_matrix = bg_matrix
for column in range(20):
background_frequencies[column] = np.sum(bg_matrix[column])
if redistribute_gaps:
for seqIndex, seq in enumerate(self.sequences):
for row in range(self.msa_length):
gap_count = 0
if seq[row] == '-':
gap_count += weights[seqIndex]
for column in range(20):
pssm[row, column] += gap_count * background_frequencies[column]
# 4) Add weighted pseudocounts
if add_pseudocounts:
pseudocounts = np.zeros((self.msa_length, 20), dtype=float)
for row in range(self.msa_length):
for column in range(20):
# if pssm[row, column] == 0:
# continue
for j in range(20):
pseudocounts[row, column] += pssm[row, column] / background_frequencies[column] * substitution_matrix[column][j]
pssm += pseudocounts
# 5) Normalize to relative frequencies
for row in range(pssm.shape[0]):
pssm[row,:] = pssm[row,:] / np.sum(pssm[row])
# 6) Divide by background frequencies
if bg_matrix == None:
pssm = pssm / 0.05
else:
for column in range(20):
pssm[:, column] = pssm[:, column] / background_frequencies[column]
# 7) Calculate Log-Score
pssm = 2 * np.log2(pssm)
pssm[pssm == -inf] = -20
pssm[pssm == -2e63] = -20
# 8) Remove rows corresponding to gaps in the primary sequence (primary sequence = first in MSA)
gap_indices_in_primary_sequence = [pos for pos, char in enumerate(self.sequences[0]) if char == '-']
pssm = np.delete(pssm, gap_indices_in_primary_sequence, axis=0)
pssm = np.rint(pssm).astype(np.int64)
print(pssm[0])
return pssm
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return (self.number_of_sequences, self.msa_length)
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
return self.primary_sequence
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
return self.weights
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
return self.observed_aas
<file_sep>import numpy as np
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.pointer_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype='i,i')
self.align()
def align(self):
"""
Align given strings using the Smith-Waterman algorithm.
NB: score matrix and the substitution matrix are different matrices!
"""
for x in range(1, len(self.string2)+1):
#loop through string2s
for y in range(1, len(self.string1)+1):
#loop through string1s
diagonal = self.score_matrix[x-1,y-1] + self.substitution_matrix[self.string2[x-1]][self.string1[y-1]]
horizontal = self.score_matrix[x,y-1] + self.gap_penalty
vertical = self.score_matrix[x-1,y] + self.gap_penalty
if diagonal > horizontal and diagonal > vertical and diagonal > 0:
self.score_matrix[x,y] = diagonal
self.pointer_matrix[x,y] = (x-1,y-1)
elif horizontal > diagonal and horizontal > vertical and horizontal > 0:
self.score_matrix[x,y] = horizontal
self.pointer_matrix[x,y] = (x,y-1)
elif vertical > diagonal and vertical > horizontal and vertical > 0:
self.score_matrix[x,y] = vertical
self.pointer_matrix[x,y] = (x-1,y)
else:
self.score_matrix[x,y] = 0
self.score_matrix[x,y] = max(diagonal,horizontal,vertical,0)
return self.score_matrix
def has_alignment(self):
"""
:return: True if a local alignment has been found, False otherwise
"""
a = np.amax(self.score_matrix)
if a != 0:
return True
else:
return False
def get_alignment(self):
"""
:return: alignment represented as a tuple of aligned strings
"""
xCord = 0
yCord = 0
max = 0
for x in range(1, len(self.string2)+1):
for y in range(1, len(self.string1)+1):
if self.score_matrix[x,y] >= max:
max = self.score_matrix[x,y]
xCord = x
yCord = y
xStart = xCord #start coordinate of the alignment
word = []
while True:
if xCord == 0 or yCord == 0:
break
if self.score_matrix[xCord,yCord] == 0:
break
tuple = self.pointer_matrix[xCord,yCord]
if tuple[0] < xCord and tuple[1] < yCord: #diagonal
word.append(self.string2[xCord-1])
xCord = tuple[0]
yCord = tuple[1]
elif tuple[0] < xCord and tuple[1] == yCord: #vertical
word.append('-')
xCord = tuple[0]
yCord = tuple[1]
elif tuple[0] == xCord and tuple[1] > yCord: #horizontal
word.append('-')
xCord = tuple[0]
yCord = tuple[1]
else:
break
word = ''.join(word[::-1])
return (word, self.string2[(xStart-len(word)):xStart])
def is_residue_aligned(self, string_number, residue_index):
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been alined
False otherwise
"""
if string_number == 1:
for x in range(0, len(self.string2)):
if self.pointer_matrix[x,residue_index][0] != 0 and self.pointer_matrix[x,residue_index][1] != 0:
return True
else:
for y in range(0, len(self.string1)):
if self.pointer_matrix[residue_index,y][0] != 0 and self.pointer_matrix[residue_index,y][1] != 0:
return True
return False
<file_sep>##############
# Exercise 2.6
##############
from collections import Counter
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
return len(self.__sequences)
def get_average_length(self):
lengths = [len(seq) for seq in self.__sequences]
print(sum(lengths))
return sum(lengths)/len(lengths)
def add_sequence(self, seq):
self.__sequences.append(seq)
def read_fasta(self, filepath):
with open(filepath, "r") as f:
seq = ""
sequence_started = False
for line in f:
if line.startswith(">") or line.startswith(";"):
if sequence_started:
self.add_sequence(seq)
seq = ""
sequence_started = False
continue
sequence_started = True
seq += line.strip()
seq = seq.replace('*', '')
self.add_sequence(seq)
def get_abs_frequencies(self):
# return number of occurences not normalized by length
letters = ''.join(self.__sequences)
return Counter(letters)
def aa_dist(self, aa_seq):
counted = Counter(aa_seq)
for key in counted:
counted[key] /= len(aa_seq)
return counted
def get_av_frequencies(self):
# return number of occurences normalized by length
letters = ''.join(self.__sequences)
return self.aa_dist(letters)
<file_sep># -*- coding: utf-8 -*-
"""
IMPORTANT!:
Before writing an email asking questions such as
'What does this input has to be like?' or
'What return value do you expect?' PLEASE read our
exercise sheet and the information in this template
carefully.
If something is still unclear, PLEASE talk to your
colleagues before writing an email!
If you experience technical issues or if you find a
bug we are happy to answer your questions. However,
in order to provide quick help in such cases we need
to avoid unnecessary emails such as the examples
shown above.
"""
from Bio.PDB.MMCIFParser import MMCIFParser # Tip: This module might be useful for parsing...
from Bio.PDB.MMCIF2Dict import MMCIF2Dict
import numpy as np
############# Exercise 2: Protein Data Bank #############
# General remark: In our exercise every structure will have EXACTLY ONE model.
# This is true for nearly all X-Ray structures. NMR structures have several models.
class PDB_Parser:
CIF_PARSER = MMCIFParser() # parser object for reading in structure in CIF format
def __init__( self, path ):
'''
Initialize every PDB_Parser with a path to a structure-file in CIF format.
An example file is included in the repository (7ahl.cif).
Tip: Store the parsed structure in an object variable instead of parsing it
again & again ...
'''
# get id of the CIF
cif_dict = MMCIF2Dict(path)
identifier = cif_dict['_entry.id']
self.structure = PDB_Parser.CIF_PARSER.get_structure(identifier, path) # Parse the structure once and re-use it in the functions below
def _get_chain(self, chain_id):
chains = list(filter(lambda chain: chain.id == chain_id, self.structure.get_chains()))
if len(chains) == 0:
return 0
chain = chains[0]
return chain
# 3.8 Chains
def get_number_of_chains( self ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
Return:
Number of chains in this structure as integer.
'''
return len(list(self.structure.get_chains()))
# 3.9 Sequence
def get_sequence( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the amino acid sequence (single-letter alphabet!) of a given chain (chain_id)
in a Biopython.PDB structure as a string.
'''
code = {'CYS': 'C', 'ASP': 'D', 'SER': 'S', 'GLN': 'Q', 'LYS': 'K',
'ILE': 'I', 'PRO': 'P', 'THR': 'T', 'PHE': 'F', 'ASN': 'N',
'GLY': 'G', 'HIS': 'H', 'LEU': 'L', 'ARG': 'R', 'TRP': 'W',
'ALA': 'A', 'VAL':'V', 'GLU': 'E', 'TYR': 'Y', 'MET': 'M'}
chains = list(filter(lambda chain: chain.id == chain_id, self.structure.get_chains()))
if len(chains) == 0:
return ''
chain = chains[0]
return ''.join([code[residue.get_resname()] for residue in chain.get_residues() if residue.get_resname() in code])
# 3.10 Water molecules
def get_number_of_water_molecules( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the number of water molecules of a given chain (chain_id)
in a Biopython.PDB structure as an integer.
'''
chain = self._get_chain(chain_id)
return len([res for res in chain.get_residues() if res.get_resname() == 'HOH' ])
# 3.11 C-Alpha distance
def get_ca_distance( self, chain_id_1, index_1, chain_id_2, index_2 ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id_1 : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
index_1 : index of a residue in a given chain in a Biopython.PDB structure
chain_id_2 : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
index_2 : index of a residue in a given chain in a Biopython.PDB structure
chain_id_1 and index_1 describe precisely one residue in a PDB structure,
chain_id_2 and index_2 describe the second residue.
Return:
Return the C-alpha (!) distance between the two residues, described by
chain_id_1/index_1 and chain_id_2/index_2. Round the returned value via int().
The reason for using two different chains as an input is that also the distance
between residues of different chains can be interesting.
Different chains in a PDB structure can either occur between two different proteins
(Heterodimers) or between different copies of the same protein (Homodimers).
'''
try:
res1 = self._get_chain(chain_id_1).get_list()[index_1]
res2 = self._get_chain(chain_id_2).get_list()[index_2]
return int( res1['CA'] - res2['CA'] )
except IndexError:
return None
# 3.12 Contact Map
def get_contact_map( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return a complete contact map (see description in exercise sheet)
for a given chain in a Biopython.PDB structure as numpy array.
The values in the matrix describe the c-alpha distance between all residues
in a chain of a Biopython.PDB structure.
Only integer values of the distance have to be given (see below).
'''
length = len(self.get_sequence(chain_id))
contact_map = [[self.get_ca_distance(chain_id, j, chain_id, i) for j in range(length)] for i in range(length)]
return np.array(contact_map).astype( np.int ) # return rounded (integer) values
# 3.13 B-Factors
def get_bfactors( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the B-Factors for all residues in a chain of a Biopython.PDB structure.
The B-Factors describe the mobility of an atom or a residue.
In a Biopython.PDB structure B-Factors are given for each atom in a residue.
Calculate the mean B-Factor for a residue by averaging over the B-Factor
of all atoms in a residue.
Sometimes B-Factors are not available for a certain residue;
(e.g. the residue was not resolved); insert np.nan for those cases.
Finally normalize your B-Factors using Standard scores (zero mean, unit variance).
You have to use np.nanmean, np.nanvar etc. if you have nan values in your array.
The returned data structure has to be a numpy array rounded again to integer.
'''
chain = self._get_chain(chain_id)
residues = [ res for res in chain.get_residues() if res.get_resname() != 'HOH' ]
b_factors = [np.nanmean(
list(
map(lambda atom: atom.get_bfactor(), res.get_list())
)) for res in residues]
# print('mean b factors', b_factors)
mean_b_factors = np.nanmean(b_factors)
var_b_factors = np.nanvar(b_factors)
normalized_b_factors = [(bf - mean_b_factors) / np.sqrt(var_b_factors) for bf in b_factors]
return np.array(normalized_b_factors).astype(np.int)
def main():
print('PDB parser class.')
# parser = PDB_Parser('./tests/7ahl.cif')
# parser.get_bfactors('A')
return None
if __name__ == '__main__':
main()
<file_sep>##############
# Exercise 2.6
##############
from pathlib import Path
from collections import Counter
class AADist:
'''
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
'''
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def __add_sequence(self, seq):
# Remove trailing asterisk, if present
if seq.endswith('*'):
seq = seq[:-1]
if len(seq) > 0:
self.__sequences.append(seq)
def read_fasta(self, filepath):
with Path(filepath).open('r') as f:
seq = ''
sequence_started = False
for line in f:
if line.startswith('>') or line.startswith(';'):
if sequence_started:
self.__add_sequence(seq)
seq = ''
sequence_started = False
continue
sequence_started = True
seq += line.strip()
self.__add_sequence(seq)
def get_counts(self):
return len(self.__sequences)
def get_average_length(self):
total_length = 0
for seq in self.__sequences:
total_length += len(seq)
return total_length / self.get_counts()
def get_abs_frequencies(self):
# not normalized by length
count = Counter()
for seq in self.__sequences:
count.update(seq)
return count
def get_av_frequencies(self):
# normalize by length
count = self.get_abs_frequencies()
total_length = 0
for seq in self.__sequences:
total_length += len(seq)
for key in count:
count[key] /= total_length
return count
<file_sep>import numpy as np
from pathlib import Path
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.sequences = []
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
self.sequences.append(sequence)
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
res = []
for seq in self.sequences:
if word in seq:
res.append(seq)
return res
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
return tuple(1, 2, 3, 4)
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.matrix = substitution_matrix
def get_words(self, positions=dict(), sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
res = []
for w1 in ALPHABET:
for w2 in ALPHABET:
for w3 in ALPHABET:
if sequence is not None:
ok, pos = self.check_word(w1+w2+w3, sequence, T)
if ok:
positions[w1+w2+w3] = pos
res.append(w1+w2+w3)
else:
ok, pos = self.check_word_pssm(w1+w2+w3, pssm, T)
if ok:
positions[w1+w2+w3] = pos
res.append(w1+w2+w3)
return res
def check_word_pssm(self, word, pssm, T):
for index in range(0, len(pssm) - 2):
val = self.get_word_score_pssm(index, word, pssm)
if val >= T:
return True, index
return False, -1
def get_word_score_pssm(self, index, word, pssm):
val = 0
for i in range(0, 3):
val += pssm[index + i][AA_TO_INT[word[i]]]
return val
def check_word(self, word, sequence, T):
for index in range(0, len(sequence) - 2):
val = self.get_word_score(index, word, sequence)
if val >= T:
return True, index
return False, -1
def check_word(self, word, sequence, T):
for index in range(0, len(sequence) - 2):
val = self.get_word_score(index, word, sequence)
if val >= T:
return True, index
return False, -1
def get_word_score(self, index, word, sequence):
val = 0
for a, b in zip(word, sequence[index:index + len(word)]):
val += self.matrix[AA_TO_INT[b]][AA_TO_INT[a]]
return val
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
word_positions = dict()
words = self.get_words(sequence=query, positions=word_positions, pssm=pssm, T=T)
for word in words:
targets = blast_db.get_sequences(word)
for target in targets:
res = self.check_word_target(query, word, word_positions[word], target, S, X)
if res is not None:
if d.get(target, None) is None:
d[target] = [res]
else:
d[target].append(res)
for key, values in d.items():
d[key] = list(set(values))
return d
def check_word_target(self, query, word, word_position, target, S, X):
query_start = word_position
query_end = word_position + 3
target_start = self.find_word(word, target)
target_end = target_start + 3
max_query_start = query_start
max_query_end = query_end
max_target_start = target_start
max_target_end = target_end
max_score = self.get_word_score(target_start, word, target)
# go right
while query_end < len(query) and target_end < len(target):
query_end += 1
target_end += 1
new_score = self.get_word_score(query_start, target[target_start:target_end], query)
if new_score > max_score:
max_query_start = query_start
max_query_end = query_end
max_target_start = target_start
max_target_end = target_end
max_score = new_score
if new_score < max_score - X:
break
query_end = max_query_end
target_end = max_target_end
while query_start >= 0 and target_start >= 0:
query_start -= 1
target_start -= 1
new_score = self.get_word_score(query_start, target[target_start:target_end], query)
if new_score > max_score:
max_query_start = query_start
max_query_end = query_end
max_target_start = target_start
max_target_end = target_end
max_score = new_score
if new_score < max_score - X:
break
query_start = max_query_start
target_start = max_target_start
if max_score >= S:
return (query_start, target_start, (target_end - target_start), max_score)
else:
return None
def find_word(self, word, seq):
for i in range(0, len(seq)-2):
if word[0] == seq[i]:
if word[1] == seq[i+1]:
if word[2] == seq[i+2]:
return i
return -1
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]
<file_sep>##############
# Exercise 2.5
##############
# You can use the supplied test cases for your own testing. Good luck!
# Write a program calledorffinder.pywhich does the following:
# (a) write a function get_orfs that takes a DNA sequence as a single string parameter
# (b) if the input sequence is not a DNA sequence it raises a TypeError exception
# (c) find all ORFs encoding proteins longer than 33,
# i.e. the resulting polypeptide has at least 34 amino acids,
# in all six reading frames (primary and reverse-complementary strand)
# (d) the return value is a list of ORFs represented as 4-tuples containing (in this order)
# the position of the first DNA residue, the position of the last DNA residue
# (including stop codon), the translated amino acid sequence as a single string,
# and a flag which is True if the ORF is parsed from the reversed strand.
# Positions start at index 0 (first nucleotide in the primary strand)
# and ORFs in the reverse-complementary strand must be indexed according
# to their position on the primary strand (i.e. start > stop except if they are circular)
# Example:(591, 704, 'MNFAKLMVRCIHMRILTMKKLMNGILIFICLHFWMIG', False)
# (e) if multiple ORFs overlap and end with the same stop codon return only the longest
# (f) mind circular DNA, e.g. plasmids
# (g) the DNA index always has to be non-negative and less than the genome length.
import re
import os
import itertools
# Genetic code dictionary
codon_dict = {
# Phenylalanine / F
'TTT':'F',
'TTC':'F',
# Leucine / L
'TTA':'L',
'TTG':'L',
'CTT':'L',
'CTC':'L',
'CTA':'L',
'CTG':'L',
# Isoleucine / I
'ATT':'I',
'ATC':'I',
'ATA':'I',
# Methionine / START
'ATG':'M',
# Valine / V
'GTT':'V',
'GTC':'V',
'GTA':'V',
'GTG':'V',
# Serine / S
'TCT':'S',
'TCC':'S',
'TCA':'S',
'TCG':'S',
# Proline / P
'CCT':'P',
'CCC':'P',
'CCA':'P',
'CCG':'P',
# Threonine / T
'ACT':'T',
'ACC':'T',
'ACA':'T',
'ACG':'T',
# Alanine / A
'GCT':'A',
'GCC':'A',
'GCA':'A',
'GCG':'A',
# Tyrosine / Y
'TAT':'Y',
'TAC':'Y',
# Histidine / H
'CAT':'H',
'CAC':'H',
# Glutamine / Q
'CAA':'Q',
'CAG':'Q',
# Asparagine / N
'AAT':'N',
'AAC':'N',
# Lysine / K
'AAA':'K',
'AAG':'K',
# Aspartic acid / D
'GAT':'D',
'GAC':'D',
# Glutamic acid / E
'GAA':'E',
'GAG':'E',
# Cysteine / C
'TGT':'C',
'TGC':'C',
# Tryptophan / W
'TGG':'W',
# Arginine / R
'CGT':'R',
'CGC':'R',
'CGA':'R',
'CGG':'R',
'AGA':'R',
'AGG':'R',
# Serine / S
'AGT':'S',
'AGC':'S',
# Glycine / G
'GGT':'G',
'GGC':'G',
'GGA':'G',
'GGG':'G',
# STOP
'TAA':'STOP\n',
'TAG':'STOP\n',
'TGA':'STOP\n'
}
orf_dictionary = {}
genome_length = 0
orf_circular = {}
def get_orfs(genome):
return get_orfs_of_size(genome, 34)
def get_orfs_of_size(genome, min_sequence_length):
if not is_genome_valid(genome):
raise TypeError
global genome_length
genome_length = len(genome)
print("Processing genome with length of:", genome_length)
orfs = []
get_orfs_in_frame(genome, min_sequence_length, 0, False)
get_orfs_in_frame(genome, min_sequence_length, 1, False)
get_orfs_in_frame(genome, min_sequence_length, 2, False)
for end, start in orf_dictionary.items():
# not circular
if end > start:
orfs.append((
start,
end,
codons_to_aa(genome[start:end-2]).replace('\n', '').replace('\r', ''),
False
))
# circular
else:
orfs.append((
start,
end,
codons_to_aa(genome[start:] + genome[0:end-2]).replace('\n', '').replace('\r', ''),
False
))
reverse_genome = get_complementary_genome(genome)[::-1]
get_orfs_in_frame(reverse_genome, min_sequence_length, 0, True)
get_orfs_in_frame(reverse_genome, min_sequence_length, 1, True)
get_orfs_in_frame(reverse_genome, min_sequence_length, 2, True)
last = len(orfs)-1
current = 0
for end, start in orf_dictionary.items():
if current <= last:
current += 1
continue
# not circular
if end < start:
orfs.append((
start,
end,
codons_to_aa(reverse_genome[reverse_index(start) : reverse_index(end-1+3)]).replace('\n', '').replace('\r', ''),
True
))
# circular
else:
orfs.append((
start,
end,
codons_to_aa(reverse_genome[reverse_index(start):] + reverse_genome[0:reverse_index(end-1+3)] ).replace('\n', '').replace('\r', ''),
True
))
#print(codons_to_aa(genome[start:end+1]))
for orf in orfs:
print(orf, get_sequence_length(orf[0], orf[1], orf[3], True))
#print(len(orfs))
#print(*orfs, sep="\n")
return orfs
def is_genome_valid(genome):
return bool(re.match('^[TCGA]+$', genome))
def get_complementary_genome(genome):
result = ''
for n in genome:
result += get_complementary_nucleobase(n)
return result
def get_complementary_nucleobase(n):
if n == 'A':
return 'T'
elif n == 'T':
return 'A'
elif n == 'C':
return 'G'
elif n == 'G':
return 'C'
def get_sequence_length(start, end, is_reverse, is_circular):
# + 1 in the brackets := so the first codon is counted as well and this makes it a multiple of 3
# / 3 = cause three codons are one amino acid
# - 1 = the stop codons are not translated into an amino acid
# % length + 3 := when the genome is circular the start has a higher index => end - start is negative
result = 0
actual_start = start
actual_end = end
if is_reverse:
actual_start = reverse_index(start)
actual_end = reverse_index(end)
if not is_circular:
result = ( (abs(actual_end - actual_start) + 1) ) / 3 - 1
else:
if not is_reverse:
result = (((actual_end - actual_start + 1) % genome_length) / 3 - 1) % int(genome_length / 3)
else:
result = (genome_length - (actual_end-actual_start) + 1) / 3 - 1
print("Length from", actual_start, "to", actual_end, "is:", result, is_reverse, is_circular)
return result
def reverse_index(index):
return genome_length - 1 - index
def orf_update_dictionary(orf_start, orf_end, is_reverse, is_circular):
global orf_dictionary, orf_circular
actual_start = orf_start
actual_end = orf_end
if is_reverse:
actual_start = reverse_index(orf_start)
actual_end = reverse_index(orf_end)
if actual_end not in orf_dictionary:
orf_dictionary[actual_end] = actual_start
orf_circular[actual_end] = is_circular
print("New orf from", actual_start, "to", actual_end, "with length:", get_sequence_length(orf_start, orf_end, is_reverse, is_circular), is_reverse, is_circular)
else:
print("Checking whether", actual_start, "to", actual_end, "is longer than", orf_dictionary[actual_end], "to", actual_end)
old_seq_length = 0
if not is_reverse:
old_seq_length = get_sequence_length(orf_dictionary[orf_end], orf_end, is_reverse, orf_circular[orf_end])
else:
old_seq_length = get_sequence_length(orf_dictionary[actual_end], actual_end, is_reverse, orf_circular[actual_end])
if get_sequence_length(orf_start, orf_end, is_reverse, is_circular) > old_seq_length:
print(get_sequence_length(orf_start, orf_end, is_reverse, is_circular), ">", old_seq_length)
print("Replacing old index:", orf_dictionary[actual_end], "with", actual_start, "for end", actual_end)
#print(get_sequence_length(orf_start, orf_end, is_reverse, is_circular), ":", get_sequence_length(orf_dictionary[orf_end], orf_end, is_reverse, is_circular))
orf_dictionary[actual_end] = actual_start
else:
print("Reject")
#for d in orf_dictionary:
# print(orf_dictionary[d], ":", d)
def get_orfs_in_frame(genome, min_sequence_length, frame, is_reverse):
if not 0 <= frame <= 2:
raise Exception('Frame argument must be between 0 and 2')
start_indices = get_orf_start_codons(genome, frame)
if not start_indices:
return
stop_indices = get_orf_stop_codons(genome, frame)
# start iteration in the start_indidices from the first start index, which is bigger than the biggest stop_index
# add tuple(last_start, first_stop)
# add only tuple when the new tuple distance is bigger than the old one
# helper list for added stops
# stop at the start_index which is "last_start"
# tadda
#print("Frame:", frame, ",looking in frame:", circular_dna_strand_frame, "with stop", circular_first_stop)
# if there is a pending start codon triple without a stop codon triple
# add the sequence from the first start codon triple, which is bigger than the last stop codon triple, to the first stop codon triple as
# it is definitely longer than any sequence which starts at the beginning and stops at the same codon triple
# does only apply to circular DNA
circular_dna_strand_frame = (frame - (genome_length % 3)) % 3
circular_frame_stops = get_orf_stop_codons(genome, circular_dna_strand_frame)
if circular_frame_stops:
#print(frame, circular_dna_strand_frame)
circular_first_stop = get_orf_stop_codons(genome, circular_dna_strand_frame)[0]
last_stop = -1
if stop_indices:
last_stop = stop_indices[-1]
for start_index in start_indices:
if last_stop == -1 or start_index > last_stop:
#print(start_index, ":", circular_first_stop, ":", get_sequence_length(start_index, circular_first_stop, is_reverse, True))
if get_sequence_length(start_index, circular_first_stop, is_reverse, True) >= min_sequence_length:
orf_update_dictionary(start_index, circular_first_stop, is_reverse, True)
# else:
# orf_update_dictionary(start_index, reverse_index(circular_first_stop), is_reverse, True)
break
# HOW TO CHECK STOPS BETWEEN FRAMES
for start_index in start_indices:
for stop_index in stop_indices:
# find the next stop
if stop_index > start_index:
# if already added, meaning there is a longer sequence with the same stop codon triple
if get_sequence_length(start_index, stop_index, is_reverse, False) < min_sequence_length:
break
# else add this sequence
#if not is_reverse:
orf_update_dictionary(start_index, stop_index, is_reverse, False)
#else:
# orf_update_dictionary(reverse_index(start_index), reverse_index(stop_index), is_reverse, False)
break
def get_orf_start_codons(genome, frame):
if not 0 <= frame <= 2:
raise Exception('Frame argument must be between 0 and 2')
start_codons = ['ATG']
start_indices = []
genome_length = len(genome)
for i in range(frame, genome_length - 2 + frame, 3):
if genome[i : (i+3) % (genome_length + 1)] in start_codons:
start_indices.append(i)
return start_indices
def get_orf_stop_codons(genome, frame):
if not 0 <= frame <= 2:
raise Exception('Frame argument must be between 0 and 2')
stop_codons = ['TAA', 'TAG', 'TGA']
stop_indices = []
genome_length = len(genome)
for i in range(frame, genome_length - 2 + frame, 3):
if genome[i : (i+3) % (genome_length + 1)] in stop_codons:
stop_indices.append((i+2) % genome_length)
return stop_indices
def codons_to_aa(orf):
if len(orf) % 3 is not 0:
return None
codons= [orf[i:i+3] for i in range(0, len(orf), 3)]
aa_seq=''.join(codon_dict[c] for c in codons)
return aa_seq
def main():
get_orfs_of_size('TTTTTTCATTTTTTATTTCAT', 2)
if __name__ == "__main__":
main()
<file_sep>import numpy as np
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.align()
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
penalty = 0
for i in range(len(self.string1) + 1):
self.score_matrix[0][i] = penalty
penalty += self.gap_penalty
penalty = 0
for i in range(len(self.string2) + 1):
self.score_matrix[i][0] = penalty
penalty += self.gap_penalty
for i in range(len(self.string2)):
for j in range(len(self.string1)):
value1 = self.score_matrix[i][j] + self.substitution_matrix[self.string2[i]][self.string1[j]]
value2 = self.score_matrix[i][j+1] + self.gap_penalty
value3 = self.score_matrix[i+1][j] + self.gap_penalty
self.score_matrix[i+1][j+1] = max(value1, value2, value3)
def get_score(self, i, j):
if i <= 0 or j <= 0:
return 1
value = self.score_matrix[i][j]
value1 = self.score_matrix[i-1][j-1] + self.substitution_matrix[self.string2[i-1]][self.string1[j-1]]
value2 = self.score_matrix[i-1][j] + self.gap_penalty
value3 = self.score_matrix[i][j-1] + self.gap_penalty
values = []
if value == value1:
values.append(self.get_score(i-1,j-1))
if value == value2:
values.append(self.get_score(i-1,j))
if value == value3:
values.append(self.get_score(i,j-1))
return sum(values)
def get_alignment_helper(self, i, j, str1, str2, tuples):
if i <= 0 or j <= 0:
return [(str1, str2)]
value = self.score_matrix[i][j]
value1 = self.score_matrix[i-1][j-1] + self.substitution_matrix[self.string2[i-1]][self.string1[j-1]]
value2 = self.score_matrix[i-1][j] + self.gap_penalty
value3 = self.score_matrix[i][j-1] + self.gap_penalty
values = []
if value == value1: # or self.string1[j-1] == self.string2[i-1]:
values.append(self.get_alignment_helper(i-1,j-1, self.string1[j-1] + str1, self.string2[i-1] + str2, []))
if value == value2:
values.append(self.get_alignment_helper(i-1,j, '-' + str1, self.string2[i-1] + str2, []))
if value == value3:
values.append(self.get_alignment_helper(i,j-1, self.string1[j-1] + str1, '-' + str2, []))
for v in values:
for a in v:
tuples.append(a)
return tuples
def get_best_score(self):
return self.score_matrix[len(self.string2)][len(self.string1)]
def get_number_of_alignments(self):
i = len(self.string2)
j = len(self.string1)
return self.get_score(i, j)
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
return self.get_alignment_helper(len(self.string2), len(self.string1), '', '', [])
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as a list of lists
"""
return self.score_matrix
def main():
print("global alignment")
matrix = {
'A': {'A': 4, 'C': 0, 'B': -2, 'E': -1, 'D': -2, 'G': 0, 'F': -2, 'I': -1, 'H': -2, 'K': -1, 'M': -1, 'L': -1, 'N': -2, 'Q': -1, 'P': -1, 'S': 1, 'R': -1, 'T': 0, 'W': -3, 'V': 0, 'Y': -2, 'X': 0, 'Z': -1},
'C': {'A': 0, 'C': 9, 'B': -3, 'E': -4, 'D': -3, 'G': -3, 'F': -2, 'I': -1, 'H': -3, 'K': -3, 'M': -1, 'L': -1, 'N': -3, 'Q': -3, 'P': -3, 'S': -1, 'R': -3, 'T': -1, 'W': -2, 'V': -1, 'Y': -2, 'X': -2, 'Z': -3},
'B': {'A': -2, 'C': -3, 'B': 4, 'E': 1, 'D': 4, 'G': -1, 'F': -3, 'I': -3, 'H': 0, 'K': 0, 'M': -3, 'L': -4, 'N': 3, 'Q': 0, 'P': -2, 'S': 0, 'R': -1, 'T': -1, 'W': -4, 'V': -3, 'Y': -3, 'X': -1, 'Z': 1},
'E': {'A': -1, 'C': -4, 'B': 1, 'E': 5, 'D': 2, 'G': -2, 'F': -3, 'I': -3, 'H': 0, 'K': 1, 'M': -2, 'L': -3, 'N': 0, 'Q': 2, 'P': -1, 'S': 0, 'R': 0, 'T': -1, 'W': -3, 'V': -2, 'Y': -2, 'X': -1, 'Z': 4},
'D': {'A': -2, 'C': -3, 'B': 4, 'E': 2, 'D': 6, 'G': -1, 'F': -3, 'I': -3, 'H': -1, 'K': -1, 'M': -3, 'L': -4, 'N': 1, 'Q': 0, 'P': -1, 'S': 0, 'R': -2, 'T': -1, 'W': -4, 'V': -3, 'Y': -3, 'X': -1, 'Z': 1},
'G': {'A': 0, 'C': -3, 'B': -1, 'E': -2, 'D': -1, 'G': 6, 'F': -3, 'I': -4, 'H': -2, 'K': -2, 'M': -3, 'L': -4, 'N': 0, 'Q': -2, 'P': -2, 'S': 0, 'R': -2, 'T': -2, 'W': -2, 'V': -3, 'Y': -3, 'X': -1, 'Z': -2},
'F': {'A': -2, 'C': -2, 'B': -3, 'E': -3, 'D': -3, 'G': -3, 'F': 6, 'I': 0, 'H': -1, 'K': -3, 'M': 0, 'L': 0, 'N': -3, 'Q': -3, 'P': -4, 'S': -2, 'R': -3, 'T': -2, 'W': 1, 'V': -1, 'Y': 3, 'X': -1, 'Z': -3},
'I': {'A': -1, 'C': -1, 'B': -3, 'E': -3, 'D': -3, 'G': -4, 'F': 0, 'I': 4, 'H': -3, 'K': -3, 'M': 1, 'L': 2, 'N': -3, 'Q': -3, 'P': -3, 'S': -2, 'R': -3, 'T': -1, 'W': -3, 'V': 3, 'Y': -1, 'X': -1, 'Z': -3},
'H': {'A': -2, 'C': -3, 'B': 0, 'E': 0, 'D': -1, 'G': -2, 'F': -1, 'I': -3, 'H': 8, 'K': -1, 'M': -2, 'L': -3, 'N': 1, 'Q': 0, 'P': -2, 'S': -1, 'R': 0, 'T': -2, 'W': -2, 'V': -3, 'Y': 2, 'X': -1, 'Z': 0},
'K': {'A': -1, 'C': -3, 'B': 0, 'E': 1, 'D': -1, 'G': -2, 'F': -3, 'I': -3, 'H': -1, 'K': 5, 'M': -1, 'L': -2, 'N': 0, 'Q': 1, 'P': -1, 'S': 0, 'R': 2, 'T': -1, 'W': -3, 'V': -2, 'Y': -2, 'X': -1, 'Z': 1},
'M': {'A': -1, 'C': -1, 'B': -3, 'E': -2, 'D': -3, 'G': -3, 'F': 0, 'I': 1, 'H': -2, 'K': -1, 'M': 5, 'L': 2, 'N': -2, 'Q': 0, 'P': -2, 'S': -1, 'R': -1, 'T': -1, 'W': -1, 'V': 1, 'Y': -1, 'X': -1, 'Z': -1},
'L': {'A': -1, 'C': -1, 'B': -4, 'E': -3, 'D': -4, 'G': -4, 'F': 0, 'I': 2, 'H': -3, 'K': -2, 'M': 2, 'L': 4, 'N': -3, 'Q': -2, 'P': -3, 'S': -2, 'R': -2, 'T': -1, 'W': -2, 'V': 1, 'Y': -1, 'X': -1, 'Z': -3},
'N': {'A': -2, 'C': -3, 'B': 3, 'E': 0, 'D': 1, 'G': 0, 'F': -3, 'I': -3, 'H': 1, 'K': 0, 'M': -2, 'L': -3, 'N': 6, 'Q': 0, 'P': -2, 'S': 1, 'R': 0, 'T': 0, 'W': -4, 'V': -3, 'Y': -2, 'X': -1, 'Z': 0},
'Q': {'A': -1, 'C': -3, 'B': 0, 'E': 2, 'D': 0, 'G': -2, 'F': -3, 'I': -3, 'H': 0, 'K': 1, 'M': 0, 'L': -2, 'N': 0, 'Q': 5, 'P': -1, 'S': 0, 'R': 1, 'T': -1, 'W': -2, 'V': -2, 'Y': -1, 'X': -1, 'Z': 3},
'P': {'A': -1, 'C': -3, 'B': -2, 'E': -1, 'D': -1, 'G': -2, 'F': -4, 'I': -3, 'H': -2, 'K': -1, 'M': -2, 'L': -3, 'N': -2, 'Q': -1, 'P': 7, 'S': -1, 'R': -2, 'T': -1, 'W': -4, 'V': -2, 'Y': -3, 'X': -2, 'Z': -1},
'S': {'A': 1, 'C': -1, 'B': 0, 'E': 0, 'D': 0, 'G': 0, 'F': -2, 'I': -2, 'H': -1, 'K': 0, 'M': -1, 'L': -2, 'N': 1, 'Q': 0, 'P': -1, 'S': 4, 'R': -1, 'T': 1, 'W': -3, 'V': -2, 'Y': -2, 'X': 0, 'Z': 0},
'R': {'A': -1, 'C': -3, 'B': -1, 'E': 0, 'D': -2, 'G': -2, 'F': -3, 'I': -3, 'H': 0, 'K': 2, 'M': -1, 'L': -2, 'N': 0, 'Q': 1, 'P': -2, 'S': -1, 'R': 5, 'T': -1, 'W': -3, 'V': -3, 'Y': -2, 'X': -1, 'Z': 0},
'T': {'A': 0, 'C': -1, 'B': -1, 'E': -1, 'D': -1, 'G': -2, 'F': -2, 'I': -1, 'H': -2, 'K': -1, 'M': -1, 'L': -1, 'N': 0, 'Q': -1, 'P': -1, 'S': 1, 'R': -1, 'T': 5, 'W': -2, 'V': 0, 'Y': -2, 'X': 0, 'Z': -1},
'W': {'A': -3, 'C': -2, 'B': -4, 'E': -3, 'D': -4, 'G': -2, 'F': 1, 'I': -3, 'H': -2, 'K': -3, 'M': -1, 'L': -2, 'N': -4, 'Q': -2, 'P': -4, 'S': -3, 'R': -3, 'T': -2, 'W': 11, 'V': -3, 'Y': 2, 'X': -2, 'Z': -3},
'V': {'A': 0, 'C': -1, 'B': -3, 'E': -2, 'D': -3, 'G': -3, 'F': -1, 'I': 3, 'H': -3, 'K': -2, 'M': 1, 'L': 1, 'N': -3, 'Q': -2, 'P': -2, 'S': -2, 'R': -3, 'T': 0, 'W': -3, 'V': 4, 'Y': -1, 'X': -1, 'Z': -2},
'Y': {'A': -2, 'C': -2, 'B': -3, 'E': -2, 'D': -3, 'G': -3, 'F': 3, 'I': -1, 'H': 2, 'K': -2, 'M': -1, 'L': -1, 'N': -2, 'Q': -1, 'P': -3, 'S': -2, 'R': -2, 'T': -2, 'W': 2, 'V': -1, 'Y': 7, 'X': -1, 'Z': -2},
'X': {'A': 0, 'C': -2, 'B': -1, 'E': -1, 'D': -1, 'G': -1, 'F': -1, 'I': -1, 'H': -1, 'K': -1, 'M': -1, 'L': -1, 'N': -1, 'Q': -1, 'P': -2, 'S': 0, 'R': -1, 'T': 0, 'W': -2, 'V': -1, 'Y': -1, 'X': -1, 'Z': -1},
'Z': {'A': -1, 'C': -3, 'B': 1, 'E': 4, 'D': 1, 'G': -2, 'F': -3, 'I': -3, 'H': 0, 'K': 1, 'M': -1, 'L': -3, 'N': 0, 'Q': 3, 'P': -1, 'S': 0, 'R': 0, 'T': -1, 'W': -3, 'V': -2, 'Y': -2, 'X': -1, 'Z': 4}
}
identity = {
'A': {'A': 1, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0, 'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'R': {'A': 0, 'R': 1, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0, 'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'N': {'A': 0, 'R': 0, 'N': 1, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0, 'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'D': {'A': 0, 'R': 0, 'N': 0, 'D': 1, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0, 'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'C': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 1, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0, 'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'E': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 1, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0, 'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'Q': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 1, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0, 'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'G': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 1, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0, 'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'H': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 1, 'I': 0, 'L': 0, 'K': 0, 'M': 0, 'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'I': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 1, 'L': 0, 'K': 0, 'M': 0, 'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'L': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 1, 'K': 0, 'M': 0, 'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'K': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 1, 'M': 0, 'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'M': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 1, 'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'F': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0, 'F': 1, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'P': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0, 'F': 0, 'P': 1, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'S': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0, 'F': 0, 'P': 0, 'S': 1, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'T': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0, 'F': 0, 'P': 0, 'S': 0, 'T': 1, 'W': 0, 'Y': 0, 'V': 0},
'W': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0, 'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 1, 'Y': 0, 'V': 0},
'Y': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0, 'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 1, 'V': 0},
'V': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0, 'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 1}
}
alignment = GlobalAlignment('AVNCCEGQHI', 'ARNDEQ', -1, identity)
print("score: {0}, n_a: {1}".format(alignment.get_best_score(), alignment.get_number_of_alignments()))
print("alignments: {0}".format(alignment.get_alignments()))
return None
if __name__ == '__main__':
main()<file_sep>import numpy as np
from pathlib import Path
import time
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.db = []
self.num_words = {}
self.words_to_seq = {}
self.lens = []
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
self.db.append(sequence)
tmp_array = []
l = len(self.db)-1
for i in range(0, len(sequence)-2):
tmp_w = sequence[i:i + 3]
if tmp_w not in tmp_array:
tmp_array.append(tmp_w)
if tmp_w not in self.num_words:
self.num_words[tmp_w] = 1
self.words_to_seq[tmp_w] = [l]
else:
self.num_words[tmp_w]+= 1
self.words_to_seq[tmp_w].append(l)
self.lens.append(len(tmp_array))
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
return [self.db[i] for i in self.words_to_seq[word]]
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
s = len(self.db)
w = len(self.num_words)
k = (s, w, np.round(np.mean(self.lens)), np.round(np.mean(list(self.num_words.values()))))
print(k)
return k
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.s = np.array(substitution_matrix)
self.words = []
for i in ALPHABET:
for j in ALPHABET:
for k in ALPHABET:
self.words.append(i+j+k)
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
answer = []
if sequence is not None:
for i in range(0, len(sequence) - 2):
tmp_w = sequence[i:i + 3]
for word in self.words:
s = 0
for j in range(3):
s+= self.s[AA_TO_INT[word[j]],AA_TO_INT[tmp_w[j]]]
if s>=T:
answer.append(word)
if pssm is not None:
pssm = np.array(pssm)
for i in range(pssm.shape[0]-2):
for word in self.words:
s = 0
for j in range(3):
s += pssm[i+j][AA_TO_INT[word[j]]]
if s>=T:
answer.append(word)
return list(set(answer))
def better_get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
answer = []
if sequence is not None:
for i in range(0, len(sequence) - 2):
tmp_w = sequence[i:i + 3]
for word in self.words:
s = 0
for j in range(3):
s += self.s[AA_TO_INT[word[j]], AA_TO_INT[tmp_w[j]]]
if s >= T:
if [word,i] not in answer:
answer.append([word,i])
if pssm is not None:
pssm = np.array(pssm)
for i in range(pssm.shape[0] - 2):
for word in self.words:
s = 0
for j in range(3):
s += pssm[i + j][AA_TO_INT[word[j]]]
if s >= T:
if [word,i] not in answer:
answer.append([word,i])
return answer
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
#d['SEQWENCE'] = [(1, 2, 4, 13)]
candidates = self.better_get_words(sequence=query,pssm=pssm,T=T)
#seqs = []
if query is not None:
for i in candidates:
seqs = blast_db.get_sequences(i[0])
for seq in seqs:
flag = False
if seq =='MLRVALLLLPGLPLAGVGATEEPTQEPGPLGEPPGLALFRWQWHEVEAPYLVALWILVASLAKIVFHLSRKVTSLVPESCLLILLGLALGGIVLAVAKKAEYQLEPGTFFLFLLPPIVLDSGYFMPSRLFFDNLGAILTYAVVGTLWNAFTTGVALWGLQQAGLVAPRVQAGLLDFLLFGSLISAVDPVAVLAVFEEVHVNETLFIIVFGESLLNDAVTAVLYKVCNSFVEMGSANVQATDYLKGVASLFVVSLGGAAVGLVFAFLLALTTRFTKRVRIIEPLLVFLLAYAAYLTAEMASLSAILAVTMCGLGCKKYVEANISHKSRTAVKYTMKTLASSAETVIFMLLGISAVDSSKWAWDSGLVLGTLFFILFFRALGVVLQTWVLNQFRLAPLDKIDQVVMSYGGLRGAVAFALVILLDRTKVPAKDYFVATTIVVVFFTVIVQGLTIKPLVKWLRVKRSDYHKPTLNQELHEHTFDHILAAVEDVVGHHGYHYWRDRWEQFDKKYLSQLLMRRSAYRIR<KEY>GGHVLSSAGLTLPSMPSRNSVAETSVTNLLRESGSGACLDLQVIDTVRSGRDREDAVMHHLLCGGLYKPRRRYKASCSRHFISEDAQERQDKEVFQQNMKRRLESFKSTKHNICFTKSKPRPRKTGHKKKDGVANPEATNGKPPRDLGFQDTAAVILTVESEEEEESESSETEKEDDEGIIFVARATSEVLQEGKVSGSLEVCPSPRIIPPSPTCAEKELPWKSGQGDLAVYVSSETTKIVPVDMQTGWNQSISSLESLASPPCTQPPTLTRLPPHPLVPEEPQVPLDLSSDPRSSFAFPPSLAKAGRSRSESSADIPQQQELQPLMGHKDHTHLSPGPANSHWCIQFNRGGRL':
flag=True
finding_tmp = -1
index_t = seq.find(i[0])
tmp_arr = []
while index_t != -1:
tmp_arr.append(0)
finding_tmp += 1
s = 0
for j in range(3):
s += self.s[AA_TO_INT[seq[index_t+j]], AA_TO_INT[query[i[1]+j]]]
null_s = s
max_s = s
# moving right
j = 3
if s >= S:
tmp_arr[finding_tmp] = (i[1], index_t, 3,s)
while max_s<s+X and len(seq)>index_t+j and len(query)>i[1]+j:
s += self.s[AA_TO_INT[seq[index_t + j]], AA_TO_INT[query[i[1] + j]]]
if s> max_s:
max_s = s
tmp_arr[finding_tmp] = (i[1], index_t, j + 1, s)
j+=1
# moving left
if tmp_arr[finding_tmp] != 0:
saved_len = tmp_arr[finding_tmp][2]
s = max_s
else:
saved_len = 3
s = null_s
j = -1
while max_s<s+X and -1<index_t+j and -1<i[1]+j:
s += self.s[AA_TO_INT[seq[index_t + j]], AA_TO_INT[query[i[1] + j]]]
if s> max_s:
max_s = s
tmp_arr[finding_tmp] = (i[1]+j, index_t+j, saved_len-j, s)
j-=1
index_t = seq.find(i[0], index_t+1)
for tmp in tmp_arr:
if tmp != 0 and tmp[3]>=S:
if seq not in d:
d[seq] = [tmp]
elif tmp not in d[seq]:
d[seq].append(tmp)
else:
for i in candidates:
seqs = blast_db.get_sequences(i[0])
for seq in seqs:
finding_tmp = -1
index_t = seq.find(i[0])
tmp_arr = []
while index_t != -1:
tmp_arr.append(0)
finding_tmp += 1
s = 0
for j in range(3):
s += pssm[i[1]+j][AA_TO_INT[seq[index_t + j]]]
null_s = s
max_s = s
# moving right
j = 3
if s >= S:
tmp_arr[finding_tmp] = (i[1], index_t, 3, s)
while max_s < s + X and len(seq) > index_t + j and pssm.shape[0] > i[1] + j:
s += pssm[i[1]+j][AA_TO_INT[seq[index_t + j]]]
if s > max_s:
max_s = s
tmp_arr[finding_tmp] = (i[1], index_t, j + 1, s)
j += 1
# moving left
if tmp_arr[finding_tmp] != 0:
saved_len = tmp_arr[finding_tmp][2]
s = max_s
else:
saved_len = 3
s = null_s
j = -1
while max_s < s + X and -1 < index_t + j and -1 < i[1] + j:
s += pssm[i[1]+j][AA_TO_INT[seq[index_t + j]]]
if s > max_s:
max_s = s
tmp_arr[finding_tmp] = (i[1] + j, index_t + j, saved_len - j, s)
j -= 1
index_t = seq.find(i[0], index_t + 1)
for tmp in tmp_arr:
if tmp != 0 and tmp[3] >= S:
if seq not in d:
d[seq] = [tmp]
elif tmp not in d[seq]:
d[seq].append(tmp)
return d
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]
<file_sep># Download git repos per hand (test folder) OLD !!!!!
0. For each excercise save the names of the students which participated in a .csv file
Save as 'students_<#ex>.csv (export names in Artemis)
1. Create folder with zip files of the excercises (e.g. test)
2. In this folder, put all zip files which contain the code and name them <#ex>.zip
3. Run unzipCode.sh
4. Adjust the files_<#ex> variables in collectFiles.sh.
Just list the single files of the respective excercise i in files_i
4. Run collectFiles.sh.
After that, each folder contains a 'collected files' folder which holds the
single files of the code, renamed with the Matrikelnummer of each student
5. Run checkCode.sh
Now, The moss results can be found in moss_results.txt and a list of the links in
moss_links.txt
6. Now mossum can be used to generate graphs of the students.
Go to folder moss/mossum/mossum and run python mossum.py -p <PERCENTAGE> <URL1> <URL2><file_sep>##############
# Exercise 2.7
##############
# http://www.russelllab.org/aas/
# https://www.sigmaaldrich.com/life-science/metabolomics/learning-center/amino-acid-reference-chart.html
# https://www.chem.wisc.edu/deptfiles/genchem/netorial/modules/biomolecules/modules/protein1/aacidstr.htm
charged_pos = [ "R", "K",
"H" ]
charged_neg = ["E", "D"]
hydrophobic = [ "A", "I", "L", "M", "V",
"F", "W", "Y", "", "", "", ] # "V", "I", "L", "M", "F", "W", "C", "A", "Y", "H", "T", "S", "P", "G"
aromatic = ["F", "W", "Y", "H"]
polar = [ "N", "Q", "S", "T", "Y" ] # "R", "K", "D", "E", "N", "Q", "H", "A", "Y", "T", "S", "P", "G"
proline = [ "P" ]
sulfur = ["C", "M"]
acid = ["D", "E" ]
basic = ["R", "H", "K" ]
'''
Error in test_aromatic_polar:
AssertionError: Your code misclassified an amino acid that is not polar. assert not True + where True = any([False, False, False, False, False, True, ...])
'''
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
return aa in charged_pos
def isNegativelyCharged(aa):
return aa in charged_neg
def isHydrophobic(aa):
return aa in hydrophobic
def isAromatic(aa):
return aa in aromatic
def isPolar(aa):
return isCharged(aa) or aa in polar
def isProline(aa):
return aa in proline
def containsSulfur(aa):
return aa in sulfur
def isAcid(aa):
return aa in acid
def isBasic(aa):
return aa in basic
<file_sep>##############
# Exercise 2.6
##############
import re
from collections import Counter
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def aa_freqs(self, aa_seq):
return Counter(aa_seq)
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
return len(self.__sequences)
def get_average_length(self):
return sum([len(seq) for _, seq in self.__sequences]) / self.get_counts()
def read_fasta(self, path):
with open(path) as f:
chunks = '\n'.join(f.readlines()).split('>')[1:]
for c in chunks:
header_body = c.split('\n')
header = header_body[0].replace('\n','')
body = '\n'.join(header_body[1:]).replace('\n','')
pattern = re.compile('[ARNDCEQGHILKMFPSTWYV]*')
body = re.search(pattern, body).group(0)
self.__sequences.append((header, body))
def get_abs_frequencies(self):
# return number of occurences not normalized by length
freqs = None
for _, aa_seq in self.__sequences:
if not freqs:
freqs = self.aa_freqs(aa_seq)
else:
freqs += self.aa_freqs(aa_seq)
return freqs
def get_av_frequencies(self):
# return number of occurences normalized by length
freqs = self.get_abs_frequencies()
length = sum([len(seq) for _, seq in self.__sequences])
for aa in freqs:
freqs[aa] /= length
return freqs
<file_sep>##############
# Exercise 2.7
##############
positive_charged = ('R', 'K', 'H')
negative_charged = ('D', 'E')
aromatic = ('W', 'Y', 'F', 'H')
polar = ('R', 'N', 'D', 'E', 'Q', 'H', 'K', 'S', 'T', 'Y')
sulphur = ('C', 'M')
acidic = ('D', 'E')
basic = ('R', 'K', 'H')
hydro_phobic = ('A', 'I', 'L', 'M', 'V', 'F', 'Y', 'W')
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
if aa in positive_charged:
return True
else:
return False
def isNegativelyCharged(aa):
if aa in negative_charged:
return True
else:
return False
def isHydrophobic(aa):
if aa in hydro_phobic:
return True
else:
return False
def isAromatic(aa):
if aa in aromatic:
return True
else:
return False
def isPolar(aa):
if aa in polar:
return True
else:
return False
def isProline(aa):
if aa == 'P':
return True
else:
return False
def containsSulfur(aa):
if aa in sulphur:
return True
else:
return False
def isAcid(aa):
if aa in acidic:
return True
else:
return False
def isBasic(aa):
if aa in basic:
return True
else:
return False
<file_sep>#!/bin/bash
# This file downloads the repositories and reorders the files into repos/ex/collected_files
files_0="main"
files_1="orffinder aa_props aa_dist"
files_2="exe2_swissprot exe2_pdb"
files_3="global_alignment local_alignment"
files_4="pssm"
files_5="blast"
excercises=(5)
#Clone Repos and templates
mkdir repos
mkdir ../templates
for ex in ${excercises[@]}; do
echo ${ex}
names=$(python get_names.py $ex)
IFS=',' read -r -a array <<< "$names"
for student in ${array[@]}; do
repo="https://$1:$2@repobruegge.in.tum.de/scm/pp1ss19exercise${ex}/pp1ss19exercise${ex}-exercise-${student}.git"
git clone ${repo} repos/${ex}/repos/${student}
done
done
#Collect files
cd repos
dirs=($(find . -maxdepth 1 -mindepth 1 -type d -printf '%f\n'))
for d in ${dirs[@]}; do
echo ${d}
cd ${d}
mkdir collected_files
dirs_in_ex=($(find repos -maxdepth 1 -mindepth 1 -type d -printf '%f\n'))
for ID in ${dirs_in_ex[@]}; do
echo ${ID}
case $d in
0)
file_list=${files_0}
;;
1)
file_list=${files_1}
;;
2)
file_list=${files_2}
;;
3)
file_list=${files_3}
;;
4)
file_list=${files_4}
;;
5)
file_list=${files_5}
;;
6)
file_list=${files_6}
;;
7)
file_list=${files_7}
;;
esac
for f in ${file_list}; do
if [ ! -d $f ]; then
mkdir collected_files/${f}
fi
done
for f in ${file_list}; do
echo $f
cp repos/${ID}/${f}.py collected_files/${f}/${ID}.py
done
done
cd ..
done
<file_sep>from itertools import product
from pathlib import Path
import numpy as np
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.sequences = []
self.word_length = 3
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
self.sequences.append(sequence)
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
return [s for s in self.sequences if word in s]
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
TODO: optimize, maybe iterate over the sequences only once.
"""
number_of_sequences = len(self.sequences)
words_per_sequence = []
for s in self.sequences:
words = {s[i:i+self.word_length] for i in range(len(s)-self.word_length+1)}
words_per_sequence.append(words)
all_words = set().union(*words_per_sequence)
number_of_different_words = len(all_words)
average_words_per_sequence = round(sum(len(s) for s in words_per_sequence) / number_of_sequences)
sequences_containing_each_word = 0
for s in self.sequences:
if all(w in s for w in all_words):
sequences_containing_each_word += 1
average_sequences_per_word = round(sequences_containing_each_word / number_of_sequences)
return (number_of_sequences,
number_of_different_words,
average_words_per_sequence,
average_sequences_per_word)
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.substitution_matrix = substitution_matrix
self.word_length = 3
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
words = set()
all_words = self.generate_all_words()
if sequence:
sequence_words = [sequence[i:i+self.word_length] for i in range(len(sequence)-self.word_length+1)]
for w, s in product(all_words, sequence_words):
score = sum(self.substitution_matrix[AA_TO_INT[w[k]], AA_TO_INT[s[k]]] for k in range(len(w)))
if score >= T:
words.add(w)
else:
L = pssm.shape[0]
for w, i in product(all_words, range(L-self.word_length+1)):
score = sum(pssm[i+k, AA_TO_INT[w[k]]] for k in range(len(w)))
if score >= T:
words.add(w)
return list(words)
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]
# custom methods
def generate_all_words(self):
return [''.join(p) for p in product(ALPHABET, repeat=self.word_length)]
if __name__ == "__main__":
import json
f = open('./tests/blast_test.json', 'r')
json_data = json.load(f)
db_sequences = json_data['db_sequences']
sub_matrix = np.array(json_data['sub_matrix'], dtype=np.int64)
query_pssm = np.array(json_data['query_pssm'], dtype=np.int64)
query_seq = json_data['query_seq']
blast_db = BlastDb()
for s in db_sequences:
blast_db.add_sequence(s)
blast = Blast(sub_matrix)
words = blast.get_words(pssm=query_pssm, T=11)
<file_sep>##############
# Exercise 2.6
##############
from collections import Counter
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
return len(self.__sequences)
def get_average_length(self):
total = 0
for (header, sequence) in self.__sequences:
total += len(sequence)
return total/self.get_counts()
def read_fasta(self, path):
f = open(path, 'r')
# Save header
first_line = f.readline()
header = first_line[1:].strip()
result = []
# Read in sequence
aa_seq = ""
sequence_started = False
for line in f:
if line.startswith(">") or line.startswith(';'):
if sequence_started:
# Remove trailing asterisk, if present
if aa_seq.endswith('*'):
aa_seq = aa_seq[:-1]
result.append((header, aa_seq))
header = line[1:].strip()
line = f.readline()
aa_seq = ""
else:
continue
sequence_started = True
aa_seq += line.strip()
# Remove trailing asterisk, if present
if aa_seq.endswith('*'):
aa_seq = aa_seq[:-1]
result.append((header, aa_seq))
self.__sequences = result
def get_abs_frequencies(self):
# return number of occurences not normalized by length
total = Counter()
for (header, sequence) in self.__sequences:
total = total + Counter(sequence)
return dict(total)
def get_av_frequencies(self):
# return number of occurences normalized by length
totalLength = 0
for (header, sequence) in self.__sequences:
totalLength += len(sequence)
abs_list = self.get_abs_frequencies()
return {key:abs_list[key]/totalLength for key in abs_list}<file_sep>##############
# Exercise 2.6
##############
from statistics import mean
from collections import Counter
from itertools import chain
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def add_sequence(self, seq):
# ignore '*' marker for distributions
self.__sequences.append(seq.replace('*',''))
def getSequences(self):
return self.__sequences
def read_fasta(self, path):
sequences = []
with open(path, "r") as f:
seq = ""
sequence_started = False
for line in f:
if line.startswith(">") or line.startswith(";"):
if sequence_started:
self.add_sequence(seq)
seq = ""
sequence_started = False
continue
sequence_started = True
seq += line.strip()
self.add_sequence(seq)
def get_counts(self):
return len(self.__sequences)
def get_average_length(self):
len_distribution = [len(s) for s in self.__sequences]
return mean(len_distribution)
def get_abs_frequencies(self):
# return number of occurences not normalized by length
counted = Counter(chain.from_iterable(self.__sequences))
return counted
def get_av_frequencies(self):
# return number of occurences normalized by length
counted = self.get_abs_frequencies()
total = sum(counted.values())
for key in counted:
counted[key] /= total
return counted
'''
Test Sequences:
[
'VLSPADKTNVKAAWGKVGAHAGEYGAEALERMFLSFPTTKTYFPHFDLSHGSAQVKGHGKKVADALTNAVAHVDDMPNALSALSDLHAHKLRVDPVNFKLLSHCLLVTLAAHLPAEFTPAVHASLDKFLASVSTVLTSKYR',
'MGSSHHHHHHSSGLVPRGSHMELRVGNRYRLGRKIGSGSFGDIYLGTDIAAGEEVAIKLECVKTKHPQL<KEY>*',
'GPTGTGESKCPLMVKVLDAVRGSPAINVAV<KEY>TWEPFASGKTSESGELHGLTTEEQFVEGIYKVEIDTKSYWKALGISPFHEHAEVVFTANDSGPRRYTIAALLSPYSYST<KEY>*',
'<KEY>',
'<KEY>ADKLEFMHILTRVNRKVATEFESFSFDATFHAKKQIPCIVSMLTKELYFYH'
]
[
'VLSPADKTNVKAAWGKVGAHAGEYGAEALERMFLSFPTTKTYFPHFDLSHGSAQVKGHGKKVADALTNAVAHVDDMPNALSALSDLHAHKLRVDPVNFKLLSHCLLVTLAAHLPAEFTPAVHASLDKFLASVSTVLTSKYR',
'MGSSHHHHHHSSGLVPRGSHMELRVGNRYRLGRKIGSGSFGDIYLGTDIAAGEEVAIKLECVKTKHPQLHIESKIYKMMQGGVGIPTIRWCGAEGDYNVMVMELLGPSLEDLFNFCSRKFSLKTVLLLADQMISRIEYIHSKNFIHRDVKPDNFLMGLGKKGNLVYIIDFGLAKKYRDARTHQHIPYRENKNLTGTARYASINTHLGIEQSRRDDLESL<KEY>GL<KEY>STPIEVLCKGYPSEFATYLNFCRSLRFDDKPDYSY<KEY>HRQGFSYDYVFDWNMLK*',
'GPTGTGESKCPLMVKVLDAVRGSPAINVAVHVFRKAADDTWEPFASGKTSESGELHGLTTEEQFVEGIYKVEIDTKSYWKALGISPFHEHAEVVFTANDSGPRRYTIAALLSPYSYSTTAVVTNPKE*',
'HHHHHHDRNRMKTLGRRDSSDDWEIPDGQITVGQRIGSGSFGTVYKGKWHGDVAVKMLNVTAPTPQQLQAFKNEVGVLRKTRHVNILLFMGYSTKPQLAIVTQWCEGSSLYHHLHIIETKFEMIKLIDIARQTAQGMDYLHAKSIIHRDLKSNNIFLHEDLTVKIGDFGLATEKSRWSGSHQFEQLSGSILWMAPEVIRMQDKNPYSFQSDVYAFGIVLYELMTGQLPYSNINNRDQIIFMVGRGYLSPDLSKVRSNCPKAMKRLMAECLKKKRDERPLFPQILASIELLARSLPKIHRS',
'MENTENSVDSKSIKNLEPKIIHGSESMDSGISLDNSYKMDYPE<KEY>'
]
'''
<file_sep>##############
# Exercise 2.5
##############
# You can use the supplied test cases for your own testing. Good luck!
complementary = {
"T": "A",
"C": "G",
"A": "T",
"G": "C",
}
codon_map = {
'ATA': 'I',
'ATC': 'I',
'ATT': 'I',
'ATG': 'M', # start
'ACA': 'T',
'ACC': 'T',
'ACG': 'T',
'ACT': 'T',
'AAC': 'N',
'AAT': 'N',
'AAA': 'K',
'AAG': 'K',
'AGC': 'S',
'AGT': 'S',
'AGA': 'R',
'AGG': 'R',
'CTA': 'L',
'CTC': 'L',
'CTG': 'L',
'CTT': 'L',
'CCA': 'P',
'CCC': 'P',
'CCG': 'P',
'CCT': 'P',
'CAC': 'H',
'CAT': 'H',
'CAA': 'Q',
'CAG': 'Q',
'CGA': 'R',
'CGC': 'R',
'CGG': 'R',
'CGT': 'R',
'GTA': 'V',
'GTC': 'V',
'GTG': 'V',
'GTT': 'V',
'GCA': 'A',
'GCC': 'A',
'GCG': 'A',
'GCT': 'A',
'GAC': 'D',
'GAT': 'D',
'GAA': 'E',
'GAG': 'E',
'GGA': 'G',
'GGC': 'G',
'GGG': 'G',
'GGT': 'G',
'TCA': 'S',
'TCC': 'S',
'TCG': 'S',
'TCT': 'S',
'TTC': 'F',
'TTT': 'F',
'TTA': 'L',
'TTG': 'L',
'TAC': 'Y',
'TAT': 'Y',
'TAA': '.', # stop
'TAG': '.', # stop
'TGC': 'C',
'TGT': 'C',
'TGA': '.', # stop
'TGG': 'W'
}
def codons_to_aa(orf):
if len(orf) % 3 != 0:
return None
codons = [orf[i:i+3] for i in range(0, len(orf), 3)]
aa_seq = ''.join(map(lambda c: codon_map[c], codons))
return aa_seq
def list_orfs(genome, is_flipped=False):
l = len(genome)
orfs = []
for off in range(3):
orfs += [(off, genome[off:off + l - (l % 3)], is_flipped)]
l -= 1
return orfs
def list_all_orfs(genome):
left = genome.upper()
left += left
left += left
right = list(map(lambda i: complementary[i], left))
right.reverse()
right = "".join(right)
return list_orfs(left) + list_orfs(right, True)
def get_orfs(genome):
try:
index = 0
orfs = list_all_orfs(genome)
result = []
for orf_offset, o, is_flipped in orfs:
index += 1
aa = codons_to_aa(o)
is_first = True
has_first = False
found = []
if "." not in aa:
continue
off = 0
for split in aa.split("."):
if "M" in split:
beginning = split.index("M")
sequence = split[beginning:]
beginning += off
ending = off + len(split) + 1
beginning = beginning * 3 + orf_offset
ending = ending * 3 + orf_offset - 1
if beginning > len(genome):
break
ending %= len(genome)
if is_flipped:
beginning = len(genome) - beginning - 1
ending = len(genome) - ending - 1
has_first = True
if not is_first:
found.append((beginning, ending, sequence, is_flipped))
off += len(split) + 1
is_first = False
found = list(filter(lambda i: len(i[2]) > 33, found))
result.extend(found)
# print(result)
return result
except:
raise TypeError
<file_sep>##############
# Exercise 2.5
##############
# You can use the supplied test cases for your own testing. Good luck!
import re
amino_dict = {"AAA":"K", "AAC":"N", "AAG":"K", "AAT":"N",
"ACA":"T", "ACC":"T", "ACG":"T", "ACT":"T",
"AGA":"R", "AGC":"S", "AGG":"R", "AGT":"S",
"ATA":"I", "ATC":"I", "ATG":"M", "ATT":"I",
"CAA":"Q", "CAC":"H", "CAG":"Q", "CAT":"H",
"CCA":"P", "CCC":"P", "CCG":"P", "CCT":"P",
"CGA":"R", "CGC":"R", "CGG":"R", "CGT":"R",
"CTA":"L", "CTC":"L", "CTG":"L", "CTT":"L",
"GAA":"E", "GAC":"D", "GAG":"E", "GAT":"D",
"GCA":"A", "GCC":"A", "GCG":"A", "GCT":"A",
"GGA":"G", "GGC":"G", "GGG":"G", "GGT":"G",
"GTA":"V", "GTC":"V", "GTG":"V", "GTT":"V",
"TAA":"", "TAC":"Y", "TAG":"", "TAT":"Y",
"TCA":"S", "TCC":"S", "TCG":"S", "TCT":"S",
"TGA":"", "TGC":"C", "TGG":"W", "TGT":"C",
"TTA":"L", "TTC":"F", "TTG":"L", "TTT":"F"}
def read_genome(file):
lines = open(file, "r")
genome = ""
for line in lines:
genome += line.strip()
lines.close()
return genome
def codons_to_aa(dna_seq):
# assumes seq length is divisible by 3
result = []
for i in range(3, len(dna_seq)+1, 3):
result.append(amino_dict[dna_seq[i-3:i]])
return ''.join(result)
def complementary(text):
letters = []
for t in text:
if t == 'A' or t == 'G':
t = t.replace('A', 'T').replace('G', 'C')
elif t == 'T' or t == 'C':
t = t.replace('T', 'A').replace('C', 'G')
letters.append(t)
return ''.join(letters)
def find_orf_matches(genome, reverse_flag=False):
result = []
gen_length = len(genome)
for i in range(3):
print(i)
iter_genome = genome[i:] + genome[:i] + genome[i:] + genome[:i]
starts = [m.start() for m in re.finditer('ATG', iter_genome)]
ends = [m.start() for m in re.finditer('TAA|TAG|TGA', iter_genome)]
for start in starts:
no_end = True
for end in ends:
if not no_end:
continue
replaced = False
n = end - start + 3
if n % 3 == 0 and n <= gen_length and n>0:
no_end = False
if n > 34 * 3:
if reverse_flag:
tup = (gen_length - 1 - (start + i) % gen_length, gen_length - 1 -(end + 2 + i) % gen_length,
codons_to_aa(iter_genome[start:end + 3]), reverse_flag, n)
else:
tup = ((start + i) % gen_length, (end + 2 + i) % gen_length, codons_to_aa(iter_genome[start:end + 3]), reverse_flag, n)
for j, orf in enumerate(result):
if tup[1] == orf[1] and orf[-1] < tup[-1]:
result[j] = tup
replaced = True
elif tup[1] == orf[1] and orf[-1] >= tup[-1]:
replaced = True
if not replaced:
result.append(tup)
final_res = []
for res in result:
final_res.append(res[:-1])
return final_res
def get_orfs(genome):
genome = genome.upper()
if re.search('[^ATCG]', genome):
raise TypeError
result = find_orf_matches(genome)
rev = complementary(genome)[::-1]
result.extend(find_orf_matches(rev, reverse_flag=True))
return result
if __name__ == '__main__':
genome = read_genome('tests/genome.txt')
get_orfs(genome)
<file_sep>import json
import os
from functools import lru_cache
from pprint import pprint
import numpy as np
from tests.matrices import MATRICES
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substituion_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.direction_matrix = np.zeros((len(string2), len(string1)), dtype=np.int)
self.align()
from pprint import pprint
print('string1', self.string1)
print('string2', self.string2)
pprint(self.score_matrix)
pprint(self.direction_matrix)
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
self.score_matrix[0, :] = np.arange(0, self.gap_penalty * (len(self.string1) + 1), self.gap_penalty)
self.score_matrix[:, 0] = np.arange(0, self.gap_penalty * (len(self.string2) + 1), self.gap_penalty)
for row, char2 in enumerate(self.string2):
for col, char1 in enumerate(self.string1):
# Match Score
match_score = self.substituion_matrix[char2][char1] + \
self.score_matrix[row][col] # score-matrix is 1 indexed
# Delete from string1
delete_from_str1_score = self.gap_penalty + \
self.score_matrix[row][col + 1] # score-matrix is 1 indexed
# Delete from string2
delete_from_str2_score = self.gap_penalty + \
self.score_matrix[row + 1][col] # score-matrix is 1 indexed
max_score = max(match_score, delete_from_str1_score, delete_from_str2_score)
self.score_matrix[row + 1, col + 1] = max_score
# Up - 001 - 1
# UpLeft - 010 - 2
# Left - 100 - 4
direction_flag = 0
if max_score == delete_from_str1_score:
direction_flag = direction_flag | 1
if max_score == match_score:
direction_flag = direction_flag | 2
if max_score == delete_from_str2_score:
direction_flag = direction_flag | 4
self.direction_matrix[row, col] = direction_flag
def get_best_score(self):
"""
:return: the highest score for the aligned strings, int
"""
return self.score_matrix[-1, -1]
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score
"""
return len(self.get_alignments())
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
return self.get_recursive_alignments(len(self.string2) - 1, len(self.string1) - 1)
@lru_cache(maxsize=128)
def get_recursive_alignments(self, row, col):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
result = []
if row == 0 and col == 0:
# Up - 001 - 1
if self.direction_matrix[row, col] & 1 == 1:
result.append(('-', self.string2[row]))
# UpLeft - 010 - 2
if self.direction_matrix[row, col] & 2 == 2:
result.append((self.string1[col], self.string2[row]))
# Left - 100 - 4
if self.direction_matrix[row, col] & 4 == 4:
result.append((self.string1[col], '-'))
return result
# Up - 001 - 1
if self.direction_matrix[row, col] & 1 == 1:
tmp_result = self.get_recursive_alignments(row - 1, col)
tmp_result = [(i[0] + '-', i[1] + self.string2[row]) for i in tmp_result]
result.extend(tmp_result)
# UpLeft - 010 - 2
if self.direction_matrix[row, col] & 2 == 2:
tmp_result = self.get_recursive_alignments(row - 1, col - 1)
tmp_result = [(i[0] + self.string1[col], i[1] + self.string2[row]) for i in tmp_result]
result.extend(tmp_result)
# Left - 100 - 4
if self.direction_matrix[row, col] & 4 == 4:
tmp_result = self.get_recursive_alignments(row, col - 1)
tmp_result = [(i[0] + self.string1[col], i[1] + '-') for i in tmp_result]
result.extend(tmp_result)
return result
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as a list of lists
"""
return self.score_matrix
if __name__ == '__main__':
relative_path = os.path.dirname(__file__)
with open(os.path.join(relative_path, 'tests/global_test.json')) as json_file:
json_data = json.load(json_file)
large_ga = GlobalAlignment(
*json_data['large']['strings'],
json_data['large']['gap_penalty'],
MATRICES[json_data['large']['matrix']]
)
pprint(large_ga.get_alignments())
<file_sep>##############
# Exercise 2.7
##############
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
positive=['R','H','K']
return aa in positive
def isNegativelyCharged(aa):
negative=['D','E']
return aa in negative
def isHydrophobic(aa):
hydrophobic=['A','C','I','L','M','V','F','W','P']
return aa in hydrophobic
#return not isPolar(aa)
def isAromatic(aa):
aromatic=['F','W','Y','H']
return aa in aromatic
def isPolar(aa):
polar=['R','N','D','E','Q','H','K','S','T','Y']
return aa in polar
def isProline(aa):
return aa == 'P'
def containsSulfur(aa):
return aa == 'M' or aa =='C'
def isAcid(aa):
return aa == 'D' or aa=='E'
def isBasic(aa):
return aa == 'K' or aa=='R' or aa=='H'
<file_sep>##############
# Exercise 2.7
##############
import re
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
return re.match(r'^([RKH])$', aa)
def isNegativelyCharged(aa):
return re.match(r'^([DE])$', aa)
def isHydrophobic(aa):
return re.match(r'^([VILFWYMA])$', aa)
def isAromatic(aa):
return re.match(r'^([FWYH])$', aa)
def isPolar(aa):
return re.match(r'^([NQYSTDERKH])$', aa)
def isProline(aa):
return re.match(r'^([P])$', aa)
def containsSulfur(aa):
return re.match(r'^([CM])$', aa)
def isAcid(aa):
return re.match(r'^([DE])$', aa)
def isBasic(aa):
return re.match(r'^([RKH])$', aa)<file_sep>from collections import Counter
import numpy as np
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
self.sequences = sequences
self.check_sequences()
def check_sequences(self):
all_in_input = True
for single_MSA in self.sequences:
all_in_input = all(i in ALPHABET for i in single_MSA) and all_in_input
secq_empty = len(self.sequences) == 0
same_length = all(len(i) == len(self.sequences[0]) for i in self.sequences)
if not all_in_input or not same_length or secq_empty:
raise TypeError()
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
pssm = np.zeros(shape=(self.get_size()[1], (len(ALPHABET))))
if not bg_matrix:
bg_matrix = np.full((20, 20), 1 / (20 * 20))
else:
bg_matrix = np.asarray(bg_matrix)
bg_matrix_frequencies = bg_matrix.sum(axis=0)
if use_sequence_weights:
weights = self.get_sequence_weights()
else:
weights = np.ones(shape=self.get_size()[0])
for i in range(self.get_size()[1]):
characters = list(zip(*self.sequences))[i]
# counter = Counter(characters)
counter = Counter()
for index, value in enumerate(characters):
if counter.get(value) is None:
counter[value] = weights[index]
else:
counter[value] = counter.get(value) + weights[index]
for index, value in enumerate(characters):
pssm[i][AA_TO_INT[value]] = counter.get(value)
# redistribute gaps
if redistribute_gaps:
pssm[:, :-1] += np.outer(pssm[:, 20].T, bg_matrix_frequencies)
# cut gaps
pssm = pssm[:, :-1]
# pseudo counts
if add_pseudocounts:
N = self.get_number_of_observations()
alpha = N - 1
for idx, line in enumerate(pssm):
pseudo_weights = np.zeros_like(line)
for jdx, weight in enumerate(line):
pseudo_weights += bg_matrix[jdx] * weight / bg_matrix_frequencies[jdx]
pssm[idx] = (alpha * pssm[idx] + beta * pseudo_weights) / (alpha + beta)
# normalize
pssm /= np.sum(pssm, axis=1, keepdims=True)
# frequencies
pssm /= bg_matrix_frequencies
# logarize matrix
pssm[[pssm == 0]] = 1 / (2 ** 10) # -10 = ln(1/(2**10)
pssm = 2 * np.log2(pssm)
# remove gaps
indecies = []
for index, value in enumerate(self.sequences[0]):
if value == "-":
indecies.append(index)
pssm = np.delete(pssm, indecies, 0)
return np.rint(pssm).astype(np.int64)
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
number_of_sequences = len(self.sequences)
len_of_MSA = len(self.sequences[0])
return (number_of_sequences, len_of_MSA)
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
return self.sequences[0].replace("-", "")
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
weights = np.zeros(self.get_size()[0], np.float64)
for i in range(self.get_size()[1]):
characters = list(zip(*self.sequences))[i]
counter = Counter(characters)
r = len(counter.keys())
if r > 1:
for index, character in enumerate(characters):
s = counter[character]
weights[index] += 1 / (r * s)
return weights
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
l = self.get_size()[1]
sum_of_rs = 0
for i in range(self.get_size()[1]):
characters = list(zip(*self.sequences))[i]
counter = Counter(characters)
sum_of_rs += len(counter.keys())
num_obs = sum_of_rs / l
return np.sum(num_obs, dtype=np.float64)
<file_sep>import numpy as np
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.predecessor_matrix = np.zeros_like(self.score_matrix) # contains decimal representations of 3 digit binary codes. From leftmost to rightmost bit: left predecessor, top predecessor, and diagonal predecessor. E.g. 100 means current cell has only the cell to its left as predecessor.
self.alignments = []
self.backtraces = []
self.align()
print(self.predecessor_matrix)
def choose_predecessor(self, row, col):
"""Chooses a cell's optimal predecessor/s.
Arguments:
row {int} -- row index
col {int} -- column index
Returns:
int -- Decimal representation of 3 digit predecessor binary code.
"""
pred_code = ['0', '0', '0']
scores = [
self.score_matrix[row][col-1] + self.gap_penalty, # left
self.score_matrix[row-1][col] + self.gap_penalty, # top
self.score_matrix[row-1][col-1] + self.substitution_matrix[self.string2[row-1]] # diagonal
[self.string1[col-1]]
]
maximum_score = max(scores)
for i, s in enumerate(scores):
if s == maximum_score:
pred_code[i] = '1'
return (int("".join(pred_code), 2), maximum_score)
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
# Initialize score_matrix
for i in range(1, len(self.score_matrix[0])):
self.score_matrix[0][i] = self.score_matrix[0][i-1] + self.gap_penalty
self.predecessor_matrix[0][i] = int('100', 2)
for i in range(1, len(self.score_matrix)):
self.score_matrix[i][0] = self.score_matrix[i-1][0] + self.gap_penalty
self.predecessor_matrix[i][0] = int('010', 2)
for i in range(1, len(self.score_matrix)):
for j in range(1, len(self.score_matrix[0])):
self.predecessor_matrix[i][j], self.score_matrix[i][j] = self.choose_predecessor(i, j)
self.backtrace(len(self.predecessor_matrix)-1, len(self.predecessor_matrix[0])-1, [], self.backtraces)
self.alignments = self.trace_to_alignment(self.backtraces) # transform backtraces to alignments
print(self.score_matrix)
def get_best_score(self):
"""
:return: the highest score for the aligned strings, int
"""
"""
scores = []
for backtrace_index, backtrace in enumerate(self.backtraces):
acc_score = self.score_matrix[len(self.score_matrix)-1][len(self.score_matrix[0])-1]
for coord, _ in backtrace:
acc_score += self.score_matrix[coord[0]][coord[1]]
scores.append((acc_score, backtrace_index))
return max([score[0] for score in scores]) """
return self.score_matrix[len(self.score_matrix)-1][len(self.score_matrix[0])-1]
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score
"""
return len(self.alignments)
def backtrace(self, row, col, pred_track, backtraces):
# alignments = passed_backtraces[:]
pred_track_local = pred_track[:] # Copy by value, otherwise pred_track doesn't get "reset" at branching cell as values will keep being appended to the very same list every time.
if row == 0 and col == 0:
backtraces.append(pred_track)
return
for i, p in enumerate(list(format(self.predecessor_matrix[row][col], '03b'))):
if int(p):
if i == 0:
pred_track = pred_track_local[:]
pred_track.append(((row, col-1), '100'))
# pred_track[(row, col-1)] = '100'
self.backtrace(row, col-1, pred_track, backtraces)
elif i == 1:
pred_track = pred_track_local[:]
pred_track.append(((row-1, col), '010'))
# pred_track[(row-1, col)] = '010'
self.backtrace(row-1, col, pred_track, backtraces)
elif i == 2:
pred_track = pred_track_local[:]
pred_track.append(((row-1, col-1), '001'))
# pred_track[(row-1, col-1)] = '001'
self.backtrace(row-1, col-1, pred_track, backtraces)
# return alignments
# return # since alignments is passed by reference we don't necessarily need to return a value
def trace_to_alignment(self, backtraces):
alignments = []
for backtrace in backtraces:
alignment = [[], []]
for predecessor in backtrace:
if predecessor[1] == '100':
alignment[0].append(self.string1[predecessor[0][1]])
alignment[1].append('-')
elif predecessor[1] == '010':
alignment[0].append('-')
alignment[1].append(self.string2[predecessor[0][0]])
elif predecessor[1] == '001':
alignment[0].append(self.string1[predecessor[0][1]])
alignment[1].append(self.string2[predecessor[0][0]])
alignments.append(("".join(alignment[0][::-1]), "".join(alignment[1][::-1])))
return alignments
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
""" self.backtrace(len(self.predecessor_matrix)-1, len(self.predecessor_matrix[0])-1, [], self.backtraces)
self.alignments = self.trace_to_alignment(self.backtraces) # transform backtraces to alignments """
return self.alignments
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as a list of lists
"""
return self.score_matrix
<file_sep>##############
# Exercise 2.7
##############
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
if(aa == "R" or aa == "K" or aa == "H"):
return True
else:
return False
def isNegativelyCharged(aa):
if(aa == "D" or aa == "E" ):
return True
else:
return False
def isPolar(aa):
polars = ['S', 'T', 'N', 'Q', 'C', 'U', 'G', 'P']
if aa in polars:
return True
else:
return False
def isHydrophobic(aa):
polars = ['A', 'V', 'I', 'L', 'M', 'F', 'Y', 'W']
if aa in polars:
return True
else:
return False
def isAromatic(aa):
if(aa == "P" or aa == "W" or aa == "T"):
return True
else:
return False
def isProline(aa):
if(aa == "P" ):
return True
else:
return False
def containsSulfur(aa):
if(aa == "M" or aa == "C"):
return True
else:
return False
def isAcid(aa):
if(aa == "D" or aa == "E" ):
return True
else:
return False
def isBasic(aa):
if(aa == "R" or aa == "H" or aa == "K"):
return True
else:
return False<file_sep>##############
# Exercise 2.7
##############
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
return aa in ["R","H","K"]
def isNegativelyCharged(aa):
return aa in ["D","E"]
def isHydrophobic(aa):
return aa in ["A","V","I","L","M","F","Y","W"]
def isAromatic(aa):
return aa in ["F","W","Y","H"]
def isPolar(aa):
return aa in ["N","Q","S","T","Y","R","D","E","H","K"]
def isProline(aa):
return aa=="P"
def containsSulfur(aa):
return aa in ["C","M","U"]
def isAcid(aa):
return aa in ["D","E"]
def isBasic(aa):
return aa in ["H","K","R"]
<file_sep>import numpy as np
from math import log
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
if not isinstance(sequences,list):
raise TypeError("Input is not a list.")
if len(sequences) < 1:
raise TypeError("Input has to contain at least one sequence.")
self.seq_len = len(sequences[0])
self.num_seqs = len(sequences)
self.sequences = sequences
self.counts = []
self.rows_to_delete = []
count_el = {}
for el in ALPHABET:
count_el[el] = 0
self.aa_matrix = np.zeros((self.num_seqs, self.seq_len), dtype=np.int)
self.weight_matrix = np.zeros((self.num_seqs, self.seq_len), dtype=np.float64)
self.observed_aa = np.zeros(self.seq_len, dtype=np.float64)
for idx1, sequence in enumerate(sequences):
if len(sequence) != self.seq_len:
raise TypeError("All the sequences must have the same length.")
for idx2, aa in enumerate(sequence):
if aa not in ALPHABET:
raise TypeError("All sequences must contain only valid amino acids and gap characters.")
self.aa_matrix[idx1,idx2] = AA_TO_INT[aa]
self.primary_seq_with_gaps = sequences[0]
for idx, aa in enumerate(self.primary_seq_with_gaps):
if aa == "-":
self.rows_to_delete.append(idx)
self.primary_seq = sequences[0].replace("-","")
self.bg_matrix = None
self.add_pseudocounts = None
self.pseudocounts_matrix = np.zeros((self.seq_len, 20))
self.f_matrix = np.zeros((self.seq_len, 20))
self.f_matrix_weights = np.zeros((self.seq_len, 20))
self.my_bg_matrix = None
self.beta = 0
self.use_sequence_weights = None
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
self.my_bg_matrix = np.zeros((20,))
self.bg_matrix = bg_matrix
self.add_pseudocounts = add_pseudocounts
self.beta = beta
self.use_sequence_weights = use_sequence_weights
if bg_matrix is None:
for i in range(20):
self.my_bg_matrix[i] = 0.05
else:
for i in range(20):
self.my_bg_matrix[i] = np.sum(bg_matrix[i])
pssm = np.zeros((self.seq_len, 20))
inf = -20
self.pssm_aux = {}
for idx in range(self.seq_len):
self.pssm_aux[idx] = []
self.sequence_weights = self.get_sequence_weights()
self.sequence_sum = np.sum(self.sequence_weights)
self.g = np.zeros((self.seq_len, 20))
# 2*log((counts_elements[idx]/float(total_sum))/p, 2)
# Count observed
if add_pseudocounts:
self.pseudocounts()
for idx2 in range(self.seq_len):
unique_elements, counts_elements = np.unique(self.aa_matrix.transpose()[idx2], return_counts=True)
# print(unique_elements)
# print(counts_elements)
total_sum = 0
total_gaps = 0
for idx, el in enumerate(counts_elements):
if unique_elements[idx] != 20:
total_sum += el
else:
total_gaps += el
self.gap_sum=0
for idx, el in enumerate(self.aa_matrix.transpose()[idx2]):
if el == 20:
self.gap_sum += self.sequence_weights[idx]
# print(total_sum)
#print(total_gaps)
for i in range(20):
gaps = 0
p = self.my_bg_matrix[i]
if redistribute_gaps:
gaps = total_gaps * p
# if add_pseudocounts:
# self.pseudocounts()
if i in unique_elements:
if use_sequence_weights:
total_weight = 0
for idz, sequence in enumerate(self.sequences):
if INT_TO_AA[i] == sequence[idx2]:
total_weight+=self.sequence_weights[idz]
if add_pseudocounts:
res_count = gaps + self.pseudocounts_matrix[idx2,i]
else:
res_count = gaps+total_weight
#print(res_count)
normalized = res_count/float(self.sequence_sum-self.gap_sum)
else:
if add_pseudocounts:
res_count = gaps + self.pseudocounts_matrix[idx2,i]
else:
res_count = gaps+counts_elements[np.where(unique_elements==i)]
normalized = res_count/float(total_sum)
background = normalized/p
result = 2*log(background, 2)
pssm[idx2][i] = result
else:
if add_pseudocounts:
res_count = gaps + self.pseudocounts_matrix[idx2,i]
else:
res_count = gaps
normalized = res_count/float(total_sum)
background = normalized/p
if background > 0:
result = 2*log(background, 2)
else:
result = -20
pssm[idx2][i] = result
pssm = np.delete(pssm,self.rows_to_delete,0)
print(np.rint(pssm).astype(np.int64))
return np.rint(pssm).astype(np.int64)
def pseudocounts(self):
alpha = self.get_number_of_observations() - 1
if self.bg_matrix is None:
substitution = 0.0025
else:
pass # use bg_matrix
g_aux = []
for idx2 in range(self.seq_len):
g_aux.append([])
unique_elements, counts_elements = np.unique(self.aa_matrix.transpose()[idx2], return_counts=True)
# print(unique_elements)
# print(counts_elements)
total_sum = 0
#total_gaps = 0
for idx, el in enumerate(counts_elements):
if unique_elements[idx] != 20:
total_sum += el
# else:
# total_gaps += el
if not self.use_sequence_weights:
for idx, el in enumerate(unique_elements):
if el == 20:
continue
else:
g_aux[idx2].append((el,counts_elements[idx]/self.my_bg_matrix[el]))
self.f_matrix[idx2][el] = counts_elements[idx] * alpha
else:
for idx, el in enumerate(unique_elements):
if el == 20:
continue
total_weight = 0
for idz, sequence in enumerate(self.sequences):
if el == sequence[idx2]:
total_weight+=self.sequence_weights[idz]
g_aux[idx2].append((el,counts_elements[idx]/self.my_bg_matrix[el]))
self.f_matrix[idx2][el] = counts_elements[idx] * alpha
# print(g_aux)
# print(self.f_matrix)
for idx1 in range(self.seq_len):
for idx2 in range(20):
my_sum = 0
for idx3, el in enumerate(g_aux[idx1]):
if self.bg_matrix is None:
my_sum += el[1] * 0.0025
else:
my_sum += el[1] * self.bg_matrix[idx2][el[0]]
self.pseudocounts_matrix[idx1,idx2] = ((my_sum * self.beta) + self.f_matrix[idx1,idx2]) / (alpha + self.beta)
#print(self.pseudocounts_matrix)
# pseudocount[i] = ((alpha * freq[i]) + (beta*g[i])) / (alpha + beta)
# return pseudocount
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return (self.num_seqs, self.seq_len)
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
return self.primary_seq
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
self.weights = np.zeros(self.num_seqs)
for idx1 in range(self.num_seqs): # FIXME
for idx2 in range(self.seq_len):
unique_elements, counts_elements = np.unique(self.aa_matrix.transpose()[idx2], return_counts=True)
aa_index = np.where(unique_elements==self.aa_matrix[idx1][idx2])
weight_r = len(unique_elements)
weight_s = counts_elements[aa_index][0]
self.observed_aa[idx2] = weight_r
if weight_r == 1: # Exclude positions where r = 1
self.weight_matrix[idx1,idx2] = 0.0
else:
self.weight_matrix[idx1,idx2] = 1.0 / (weight_r * weight_s)
for idx in range(self.num_seqs):
self.weights[idx] = np.sum(self.weight_matrix[idx])
return self.weights.astype(np.float64)
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
num_obs = (1 / self.seq_len) * np.sum(self.observed_aa)
return num_obs.astype(np.float64)
<file_sep># -*- coding: utf-8 -*-
"""
IMPORTANT!:
Before writing an email asking questions such as
'What does this input has to be like?' or
'What return value do you expect?' PLEASE read our
exercise sheet and the information in this template
carefully.
If something is still unclear, PLEASE talk to your
colleagues before writing an email!
If you experience technical issues or if you find a
bug we are happy to answer your questions. However,
in order to provide quick help in such cases we need
to avoid unnecessary emails such as the examples
shown above.
"""
from Bio.PDB.Polypeptide import *
from Bio.PDB.MMCIFParser import MMCIFParser # Tip: This module might be useful for parsing...
#from MDAnalysis.lib.distance import distance_array
import numpy as np
############# Exercise 2: Protein Data Bank #############
# General remark: In our exercise every structure will have EXACTLY ONE model.
# This is true for nearly all X-Ray structures. NMR structures have several models.
class PDB_Parser:
CIF_PARSER = MMCIFParser() # parser object for reading in structure in CIF format
def __init__( self, path ):
'''
Initialize every PDB_Parser with a path to a structure-file in CIF format.
An example file is included in the repository (7ahl.cif).
Tip: Store the parsed structure in an object variable instead of parsing it
again & again ...
'''
mmcifParser = MMCIFParser()
self.structure = mmcifParser.get_structure(1, path) # Parse the structure once and re-use it in the functions below
# 3.8 Chains
def get_number_of_chains( self ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
Return:
Number of chains in this structure as integer.
'''
chain_counter = 0
for m in self.structure:
chains = m.get_chains()
for c in chains:
chain_counter = chain_counter + 1
return chain_counter
# 3.9 Sequence
def get_sequence( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the amino acid sequence (single-letter alphabet!) of a given chain (chain_id)
in a Biopython.PDB structure as a string.
'''
for m in self.structure:
chains = m.get_chains()
for c in chains:
if c.id == chain_id:
sequence = ''
residues = c.get_residues()
for r in residues:
if r.get_resname() == 'HOH':
pass
#sequence += 'W'
else:
sequence += three_to_one(r.get_resname())
return sequence
return ''
# 3.10 Water molecules
def get_number_of_water_molecules( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the number of water molecules of a given chain (chain_id)
in a Biopython.PDB structure as an integer.
'''
for m in self.structure:
chains = m.get_chains()
for c in chains:
if c.id == chain_id:
n_water = 0
residues = c.get_residues()
for r in residues:
if r.get_resname() == 'HOH':
n_water = n_water + 1
return n_water
return 0
# 3.11 C-Alpha distance
def get_ca_distance( self, chain_id_1, index_1, chain_id_2, index_2 ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id_1 : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
index_1 : index of a residue in a given chain in a Biopython.PDB structure
chain_id_2 : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
index_2 : index of a residue in a given chain in a Biopython.PDB structure
chain_id_1 and index_1 describe precisely one residue in a PDB structure,
chain_id_2 and index_2 describe the second residue.
Return:
Return the C-alpha (!) distance between the two residues, described by
chain_id_1/index_1 and chain_id_2/index_2. Round the returned value via int().
The reason for using two different chains as an input is that also the distance
between residues of different chains can be interesting.
Different chains in a PDB structure can either occur between two different proteins
(Heterodimers) or between different copies of the same protein (Homodimers).
'''
chain1 = None
chain2 = None
for m in self.structure:
chains = m.get_chains()
for c in chains:
if c.id == chain_id_1:
chain1 = c
if c.id == chain_id_2:
chain2 = c
if chain1 == None or chain2 == None:
print('Chain error for id {0} or {1}.'.format(chain_id_1, chain_id_2))
return -1
residues1 = chain1.get_residues()
residues2 = chain2.get_residues()
#if len(residues1) <= index_1 or len(residues2) <= index_2:
# print('Chain index error for index {0} or {1}.'.format(index_1, index_2))
# return -1
residue1 = None
residue2 = None
index = 0
for r in residues1:
if index == index_1:
residue1 = r
break
index = index + 1
index = 0
for r in residues2:
if index == index_2:
residue2 = r
break
index = index + 1
atoms1 = residue1.get_atoms()
atoms2 = residue2.get_atoms()
min_distance = float('inf')
center_of_mass1 = np.array([0.0, 0.0, 0.0])
mass1 = 0.0
count = 0
for a1 in atoms1:
if a1.get_fullname() == 'CA':
center_of_mass1 = a1.get_coord()
count = count + 1
center_of_mass2 = np.array([0.0, 0.0, 0.0])
mass2 = 0.0
count = 0
for a2 in atoms2:
if a2.get_fullname() == 'CA':
center_of_mass2 = a2.get_coord()
count = count + 1
#print(center_of_mass1)
diff_vector = np.subtract(center_of_mass1, center_of_mass2)
distance = np.sqrt(np.sum(diff_vector * diff_vector))
#print("distance: {0}".format(distance))
#print(center_of_mass1)
#distance = np.linalg.norm(np.subtract(center_of_mass1, center_of_mass2))
#distance = np.sqrt(np.sum(center_of_mass1 * center_of_mass2))
#print("distance: {0}".format(distance))
return int(distance)
# 3.12 Contact Map
def get_contact_map( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return a complete contact map (see description in exercise sheet)
for a given chain in a Biopython.PDB structure as numpy array.
The values in the matrix describe the c-alpha distance between all residues
in a chain of a Biopython.PDB structure.
Only integer values of the distance have to be given (see below).
'''
chain = None
for m in self.structure:
chains = m.get_chains()
for c in chains:
if c.id == chain_id:
chain = c
if chain == None:
print('Chain not found error for id {0}.'.format(chain_id))
return []
length = len(self.get_sequence(chain_id))
contact_map = []
for i in range(length):
contact_map.append([])
for j in range(length):
contact_map[i].append(self.get_ca_distance(chain_id, i, chain_id, j))
result_map = np.array(contact_map, dtype=np.float32)
result_map = result_map.astype( np.int) # return rounded (integer) values
print("result: {0}".format(result_map))
return result_map
# 3.13 B-Factors
def get_bfactors( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the B-Factors for all residues in a chain of a Biopython.PDB structure.
The B-Factors describe the mobility of an atom or a residue.
In a Biopython.PDB structure B-Factors are given for each atom in a residue.
Calculate the mean B-Factor for a residue by averaging over the B-Factor
of all atoms in a residue.
Sometimes B-Factors are not available for a certain residue;
(e.g. the residue was not resolved); insert np.nan for those cases.
Finally normalize your B-Factors using Standard scores (zero mean, unit variance).
You have to use np.nanmean, np.nanvar etc. if you have nan values in your array.
The returned data structure has to be a numpy array rounded again to integer.
'''
chain = None
for m in self.structure:
chains = m.get_chains()
for c in chains:
if c.id == chain_id:
chain = c
if chain == None:
print('Chain not found error for id {0}.'.format(chain_id))
return []
length = len(self.get_sequence(chain_id))
b_factors = []
residue_count = 0
for r in chain.get_residues():
if r.get_resname() == 'HOH':
continue
atom_count = 0
bfactor = 0.0
for a in r.get_atoms():
value = a.get_bfactor()
if value == None:
bfactor = np.nan
break
bfactor = bfactor + value
atom_count = atom_count + 1
if atom_count == 0:
bfactor = np.nan
if bfactor != np.nan:
b_factors.append(bfactor / atom_count)
else:
b_factors.append(np.nan)
residue_count = residue_count + 1
mean = np.nanmean(b_factors)
std = np.sqrt(np.nanvar(b_factors))
for i in range(len(b_factors)):
b_factors[i] = (b_factors[i] - mean) / std
b_factors_normalized = np.array(b_factors, dtype=np.float32)
#print("shape: {0}".format(b_factors_normalized.shape))
result = b_factors_normalized.astype( np.int )
#print("mine: {0}".format(result))
return result# return rounded (integer) values
def main():
print('PDB parser class.')
parser = PDB_Parser('./tests/7ahl.cif')
print("number of chains: {0}, sequence: {1}, n_water: {2}, distance_same: {3}, distance_diff: {4}".format(parser.get_number_of_chains(), parser.get_sequence('A'), parser.get_number_of_water_molecules('A'), parser.get_ca_distance('A', 20, 'A', 55), parser.get_ca_distance('A', 121, 'E', 120)))
#print("map: {0}".format(parser.get_contact_map('A')))
#parser.get_number_of_chains()
#print("bfactors: {0}".format(parser.get_bfactors('A')))
return None
if __name__ == '__main__':
main()<file_sep>import numpy as np
from tests.matrices import MATRICES
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.max_score = 0
self.max_position = (0, 0)
self.residue_index = [[], []]
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros(
(len(string2) + 1, len(string1) + 1), dtype=np.int)
self.align()
def align(self):
"""
Align given strings using the Smith-Waterman algorithm.
NB: score matrix and the substitution matrix are different matrices!
"""
for i in range(len(self.string2)+1):
self.score_matrix[i][0] = self.gap_penalty*i
for j in range(len(self.string1)+1):
self.score_matrix[0][j] = self.gap_penalty*j
for i in range(1, len(self.string2)+1):
for j in range(1, len(self.string1)+1):
self.score_matrix[i][j] = max(
self.score_matrix[i-1][j-1] +
self.substitution_matrix[self.string2[i-1]
][self.string1[j-1]],
self.score_matrix[i-1][j]+self.gap_penalty,
self.score_matrix[i][j-1]+self.gap_penalty,
0
)
if self.score_matrix[i][j] > self.max_score:
self.max_score = self.score_matrix[i][j]
self.max_position = (i, j)
def has_alignment(self):
"""
:return: True if a local alignment has been found, False otherwise
"""
a1, a2 = self.get_alignment()
if a1 != "" and a2 != "":
return True
else:
return False
def get_alignment(self):
"""
:return: alignment represented as a tuple of aligned strings
"""
alignment1, alignment2 = self.find_alignment(
"", "", self.max_position[0], self.max_position[1])
return (alignment1, alignment2)
def find_alignment(self, alignment1, alignment2, i, j):
if self.score_matrix[i][j] == 0:
return (alignment1, alignment2)
if i > 0 and j > 0 and self.score_matrix[i][j] == self.score_matrix[i-1][j-1]+self.substitution_matrix[self.string2[i-1]][self.string1[j-1]]:
self.residue_index[0].append(j-1)
self.residue_index[1].append(i-1)
return self.find_alignment(self.string1[j-1]+alignment1, self.string2[i-1]+alignment2, i-1, j-1)
if i > 0 and self.score_matrix[i][j] == self.score_matrix[i-1][j]+self.gap_penalty:
self.residue_index[1].append(i-1)
return self.find_alignment('-'+alignment1, self.string2[i-1]+alignment2, i-1, j)
if j > 0 and self.score_matrix[i][j] == self.score_matrix[i][j-1]+self.gap_penalty:
self.residue_index[0].append(j-1)
return self.find_alignment(self.string1[j-1]+alignment1, '-'+alignment2, i, j-1)
return (alignment1, alignment2)
def is_residue_aligned(self, string_number, residue_index):
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been alined
False otherwise
"""
if residue_index in self.residue_index[string_number-1]:
return True
return False
def main():
align = LocalAlignment("ARNDCEQGHI", "DDCEQHG", -6, MATRICES["blosum"])
print(align.get_alignment())
return None
if __name__ == '__main__':
main()
<file_sep>import numpy as np
from pathlib import Path
from functools import reduce
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.sequences = []
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
self.sequences.append(sequence)
@staticmethod
def _get_words(seq):
word_len = 3
return [seq[i: i + word_len] for i in range(len(seq) - word_len + 1)]
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
return [seq for seq in self.sequences if word in seq]
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
num_seq = len(self.sequences)
words_list = [list(set(BlastDb._get_words(seq))) for seq in self.sequences]
word_to_count = {}
# collect the counts of unique words, so that each words do not have to be searched in each sequence again
for words in words_list:
for word in words:
if word in word_to_count:
word_to_count[word] += 1
else:
word_to_count[word] = 1
# the unique words found in those sequences
words = word_to_count.keys()
# unique words
# words = list(set(words_list_flattened))
num_words = len(words)
# avg. number of words per seq.
avg_words_per_seq = round(
sum(
# each list in "words_list" is the words generated by each sequence
# get the unique list of different words by putting it to a set,
# compute the length of the set to get num. of diff words in each sequence
[len(diff_words) for diff_words in words_list]
# then take the average of the above list
) / len(self.sequences)
)
# avg. number of seq. per word (???)
avg_seq_per_word = round(
sum(
# list of number of occurences of each word found
word_to_count.values()
# take the average of it
) / num_words
) #.....
res = (num_seq, num_words, avg_words_per_seq, avg_seq_per_word)
return res
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.substitution_matrix = substitution_matrix
self.list_of_words = Blast.list_of_words() # precompute all possiblities once
@staticmethod
def list_of_words():
# return the list of possible words, which should be 20 ^ 3 = 8000
return [a + b + c for a in ALPHABET for b in ALPHABET for c in ALPHABET]
def matching_score(self, word_a, word_b):
assert len(word_a) == len(word_b) > 0 # constriant on words to query
return sum([self.substitution_matrix[AA_TO_INT[a]][AA_TO_INT[b]] for a,b in zip(word_a, word_b)])
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
if sequence:
res = [
query_word
for word in set(BlastDb._get_words(sequence)) # for each of the generated words in this sequence...
for query_word in self.list_of_words # and for each of the possible words...
if self.matching_score(word, query_word) >= T # filter out those with score >= T
]
res = list(set(res))
print('get_words', res)
return res
# pssm here...
L = len(pssm)
res = [
query_word # we want the word under test
for query_word in self.list_of_words # amongst the list of possible words..
for ind in range(L - len(query_word)) # in all column (row) of the pssm within the window length(usually it's 3, but in case it is not...)
if sum([
# if in this window, the score provided by pssm is >= T
pssm[ind + l][AA_TO_INT[aa]] for l, aa in enumerate(query_word)
]) >= T
]
print('get words pssm:', res)
return sorted(list(set(res)))
@staticmethod
def permutate(list_a, list_b):
return [(a, b) for a in list_a for b in list_b]
@staticmethod
def flatten_list(nested_list):
return list(reduce(lambda res, sublist: [*res, *sublist], nested_list))
@staticmethod
def hsp(sequence, word, query_seq, X):
# find all location of the word
W = len(word)
# all locations containing this word
locations = [i for i in range(len(sequence) - W) if sequence[i: i + W] == word]
query_locations = [i for i in range(len(query_seq) - W ) if query_seq[i: i + W] == word]
print('seq,', sequence, word, locations[:3])
def expand(loc, query_loc):
'''
Small routine to be used in one particular direction (doesn't care here)
query sequence starts at query_loc, target sequence (in database) starts at loc
return di, and the highest score reached
'''
# initialze before expanding
score = self.matching_score(
sequence[loc: loc + W], # initial sequence under the word hit
query_seq[query_loc: query_loc + W] # and the corresponding location under the query
)
# the max score, initially is the word hit
max_score = score
# start proceeding to the right, then to the left
'''
loc + di ----->
|---------- len(sequence) - loc ------------|
sequence: ASASASASASASASA|A|SA)SASASASASAS
^
| The word
v
query: SSASASS|A|SA)AAAAAAASASSAS...
query_loc + di ----->
|---------- len(query_seq) - query_loc --------|
'''
for di in range(
min(len(sequence) - loc, len(query_seq) - query_loc)
):
score += self.matching_score(sequence[loc + di], query_seq[quey_loc + di])
if max_score - score <= X:
# needs to terminate here
break
if score > max_score:
max_score = score # update the maximum score if it preceeds the previous ones
# reached the tip of one of the sequence and it is still not the end...
seq_len = di # for now it is true
# now switch to the another direction
'''
di = -1, -2, -3, ......
<------ loc + di
|----loc-------|
|---seq_len----|
sequence: ASASASASASASASA|A|SA)SASASASAS||AS
^
| The word (ASA)
v
query: SSASASS|A|SA)AAAAAAASA||SSAS...
|---seq_len----|
<------ query_loc + di
|-------|
^
|
query_loc
'''
for di in map(lambda x: -x, range(min(loc, query_loc))):
score += self.matching_score(sequence[loc + di], query_seq[query_loc + di])
if max_score - score <= X:
break
if score > max_score:
max_score = score
seq_len -= di # since it is a minus
return seq_len, di, score
hsp_list = []
for loc, query_loc in Blast.permutate(locations, query_locations):
'''
loc: index of the starting of hsp
di: the iterating index
score: sum of the score from the start till (loc + di)
max_score: maxium_score reached.
X: the drop-off score the sequence needs to terminate on this direction
'''
# in one direction
seq_len, di, score = expand(loc, query_loc)
hsp_list.append((query_loc + di, loc + di, seq_len, score))
return hsp_list
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
# words the query is interested in
words = self.get_words(sequence = query, pssm = pssm, T = T)
# the sequences that contain such words
candidate_sequences = list(
set(Blast.flatten_list([blast_db.get_sequences(word) for word in words]))
)
d = {}
if query:
for target_sequence, word in Blast.permutate(candidate_sequences, words):
hsp_list = Blast.hsp(target_sequence, word, query, X)
print('hsp_list', hsp_list)
hsp_list = [hsp for hsp in hsp_list if hsp[-1] >= S] # keep those with high scores
for hsp in hsp_list:
start_q, start_t, seq_len, score = hsp
d[target_sequence[start_t: start_t + seq_len]] = hsp
return d
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]
<file_sep>import numpy as np
from pathlib import Path
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.db_sequences = []
self.db_words = set()
self.wordToSequence = {}
self.sequenceToWordCount = []
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
self.db_sequences.append(sequence)
seq_words = set() #set of different words in the new sequence
for idx in range(0,len(sequence)-2): #for all words in the new sequence
word = sequence[idx:idx+3]
self.db_words.add(word) #add the word to db_words if it isn't already there
word_multiple_time_flag = False
if word in seq_words:
word_multiple_time_flag = True
seq_words.add(word) #update the list of words in the new sequence
#update the list of sequences which the word exists
if word not in self.wordToSequence:
self.wordToSequence[word] = []
if not word_multiple_time_flag:
self.wordToSequence[word].append(len(self.db_sequences)-1)
self.sequenceToWordCount.append(len(seq_words))
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
res =[self.db_sequences[i] for i in self.wordToSequence[word]]
return res
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
numSeq = len(self.db_sequences)
numWords = len(self.db_words)
avgWordPerSeq = int(np.rint(np.mean(self.sequenceToWordCount)))
tot = 0
cnt = len(list(self.wordToSequence.values()))
for l in list(self.wordToSequence.values()):
tot += len(l)
avgSeqPerWord = int(np.rint(tot/cnt))
return (numSeq, numWords, avgWordPerSeq, avgSeqPerWord)
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.sub_matrix = substitution_matrix
def get_word_score(self,word1,word2,T):
sum = 0
for i in range(0,3):
sum += self.sub_matrix[AA_TO_INT[word1[i]]][AA_TO_INT[word2[i]]]
return sum >= T
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
res_set = set()
#seq_words = set()
if not sequence == None: #query sequence is given
for idx in range(0,len(sequence)-2): #for all word positions in the query sequence
query_word = sequence[idx:idx+3]
#permute all word combinations
for aa1 in ALPHABET:
for aa2 in ALPHABET:
for aa3 in ALPHABET:
word = aa1+aa2+aa3
if self.get_word_score(query_word,word,T): #score is at least T
res_set.add(word)
elif not pssm is None: #PSSM is given
pssm_len = pssm.shape[0]
for idx in range(0,pssm_len-2): #for every word position in PSSM
for aa1 in ALPHABET:
for aa2 in ALPHABET:
for aa3 in ALPHABET:
if pssm[idx][AA_TO_INT[aa1]]+pssm[idx+1][AA_TO_INT[aa2]]+pssm[idx+2][AA_TO_INT[aa3]] >= T:
word = aa1 + aa2 + aa3
res_set.add(word)
return list(res_set)
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
#d = dict()
#d['SEQWENCE'] = [(1, 2, 4, 13)]
words = get_words(self, sequence=query, pssm=pssm,T=T)
#if not sequence == None: #query sequence is given
# for word in words: #for every word
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]
<file_sep>import numpy as np
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
alpha_widout_gap='ACDEFGHIKLMNPQRSTVWY'
newmap_aa2int = {aa:index for index, aa in enumerate(alpha_widout_gap)}
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
self.sequences = sequences
if len(self.sequences)==0:
raise TypeError()
firstlen=len(self.sequences[0])
for seq in self.sequences:
if len(seq)!=firstlen:
raise TypeError()
for seq in self.sequences:
if not all(list(map(lambda x: x in ALPHABET,seq))):
raise TypeError()
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
bg_array=None
if(bg_matrix!=None):
rowsum=np.sum(bg_matrix,axis=0)
bg_array=np.zeros(20)
for i in range(20):
bg_array[i] = rowsum[newmap_aa2int[INT_TO_AA[i]]]
row_index = []
for i,c in enumerate(self.sequences[0]):
if (c != '-'):
row_index.append(i)
pssm = np.zeros(shape=(len(row_index),21))
if(use_sequence_weights):
wt=self.get_sequence_weights()
for i,seq in enumerate(self.sequences):
for j in range(len(row_index)):
pssm[j][AA_TO_INT[seq[row_index[j]]]]=wt[i]+pssm[j][AA_TO_INT[seq[row_index[j]]]]
else:
for seq in self.sequences:
for j in range(len(row_index)):
pssm[j][AA_TO_INT[seq[row_index[j]]]]=1+pssm[j][AA_TO_INT[seq[row_index[j]]]]
if(redistribute_gaps):
if(bg_matrix==None):
for i in range(len(row_index)):
gaps=pssm[i][20]
if (gaps>0):
for j in range(20):
pssm[i][j]= pssm[i][j]+(gaps*0.05)
else:
for i in range(len(row_index)):
gaps=pssm[i][20]
if (gaps>0):
for j in range(20):
pssm[i][j]= pssm[i][j]+(gaps*bg_array[j])
pssm = pssm[:,0:-1]
if(add_pseudocounts):
if(bg_matrix==None):
alpha=self.get_number_of_observations()-1
counts = np.zeros(shape=(len(row_index),20))
for i in range(len(row_index)):
for j in range(20):
sum = 0
for pssm_col in range(20):
sum=sum+(pssm[i][pssm_col]*0.0025)/0.05
counts[i][j] = sum
pssm1= np.zeros(shape=(counts.shape[0],counts.shape[1]))
for i in range(pssm1.shape[0]):
pssm1[i] = (alpha*pssm[i] + beta*counts[i])
pssm1[i]/=(alpha+beta)
pssm = pssm1
else:
alpha=self.get_number_of_observations()-1
counts = np.zeros(shape=(len(row_index),20))
for i in range(len(row_index)):
for j in range(20):
sum = 0
for pssm_col in range(20):
aaa = INT_TO_AA[j]
bbb = INT_TO_AA[pssm_col]
nume=bg_matrix[newmap_aa2int[aaa]][newmap_aa2int[bbb]]
denom=bg_array[pssm_col]
sum=sum+(pssm[i][pssm_col]*nume)/denom
counts[i][j] = sum
pssm1= np.zeros(shape=(counts.shape[0],counts.shape[1]))
for i in range(pssm1.shape[0]):
pssm1[i] = (alpha*pssm[i] + beta*counts[i])
pssm1[i]/=(alpha+beta)
pssm = pssm1
pssm = pssm/np.sum(pssm,axis=1)[:,None]
if(bg_matrix==None):
pssm=pssm/0.05
else:
for i in range(len(row_index)):
for j in range(20):
pssm[i][j]/=bg_array[j]
pssm =2*np.log2(pssm)
pssm[pssm == -np.infty]= (-1)*20
pssm=np.rint(pssm).astype(np.int64)
return pssm
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return (len(self.sequences), len(self.sequences[0]))
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
# return 'NOPE'
return self.sequences[0].replace("-","")
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
pssm=np.zeros(shape=(len(self.sequences[0]), 21))
for seq in self.sequences:
for i in range(len(self.sequences[0])):
pssm[i][AA_TO_INT[seq[i]]]=1+pssm[i][AA_TO_INT[seq[i]]]
pssm = np.rint(pssm).astype(np.int64)
tmp = np.count_nonzero(pssm,axis=1)
matter_index=np.nonzero(pssm)
matter_map={}
for i in range(len(np.nonzero(pssm)[0])):
if matter_index[0][i] not in matter_map:
matter_map[matter_index[0][i]]={INT_TO_AA[matter_index[1][i]]:pssm[matter_index[0][i]][matter_index[1][i]]}
else:
ttt = matter_map[matter_index[0][i]]
ttt[INT_TO_AA[matter_index[1][i]]] = pssm[matter_index[0][i]][matter_index[1][i]]
mat=np.zeros(shape=(len(self.sequences[0]),len(self.sequences)))
################################# final calculate
for i in range(0,len(self.sequences[0])):
for j in range(0,len(self.sequences)):
if tmp[i] != 1:
tryp = self.sequences[j][i]
mat[i][j] = 1/(tmp[i]*matter_map[i][tryp])
wt=np.sum(mat,axis=0).astype(np.float64)
return wt
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
pssm = np.zeros(shape=(len(self.sequences[0]), 21))
for seq in self.sequences:
for i in range(len(self.sequences[0])):
pssm[i][AA_TO_INT[seq[i]]]=1+pssm[i][AA_TO_INT[seq[i]]]
pssm = np.rint(pssm).astype(np.int64)
tmp=np.count_nonzero(pssm, axis=1)
obs=np.sum(tmp)/len(self.sequences[0])
return np.float64(obs)
<file_sep>import numpy as np
from collections import Counter
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
def is_msa_valid(seqs):
if not isinstance(seqs, list) or len(seqs) == 0:
return False
seq_len = len(seqs[0])
for seq in seqs:
if not isinstance(seq, str):
return False
if len(seq) != seq_len or len(seq) == 0:
return False
for char in seq:
if char not in ALPHABET:
return False
return True
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
self.seqs = sequences
if not is_msa_valid(self.seqs):
raise TypeError
self.seq_count = len(self.seqs)
self.seq_len = len(self.seqs[0])
# compute sequence weights
weights = np.zeros(self.seq_count)
observations_sum = 0
self.col_aa_count = []
self.seq_inv = []
for i in range(self.seq_len):
aa_list = []
for seq in self.seqs:
aa_list.append(seq[i])
r = len(set(aa_list))
self.seq_inv.append(aa_list)
# compute independent observations using r
observations_sum += r
aa_counter = Counter(aa_list)
if aa_list[0] != '-':
self.col_aa_count.append(aa_counter)
if r == 1:
continue
new_weights = np.zeros(self.seq_count)
for j, aa in enumerate(aa_list):
new_weights[j] = 1 / (aa_counter[aa] * r)
weights = weights + new_weights
self.weights = weights
self.observations = observations_sum / self.seq_len
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
L = len(self.get_primary_sequence())
pssm = np.zeros((L, 20))
alpha = self.get_number_of_observations() - 1
if not bg_matrix:
bg_matrix = np.ones((20, 20)) * (1 / 400)
i = -1
for col in self.seq_inv:
# ignore gaps in primary sequence
if col[0] == "-":
continue
i += 1
new_pssm = np.zeros(21) # including gap
# 2. Count observed aa and gaps
if use_sequence_weights:
# Compute f from weights
for aa in set(col):
# find indices
idx = [j for j, x in enumerate(col) if x == aa]
# sum weights
new_pssm[AA_TO_INT[aa]] = sum(self.weights[idx])
else:
aa_counter = self.col_aa_count[i]
for aa in aa_counter:
new_pssm[AA_TO_INT[aa]] = aa_counter[aa]
# Discard gap element
gap = new_pssm[-1]
new_pssm = new_pssm[0:-1]
# 3. Redistribute gaps
if redistribute_gaps:
new_pssm += gap * np.sum(bg_matrix, axis=1)
# 4. Pseudocounts
if add_pseudocounts:
g_i = np.zeros(20)
for a in range(20):
g_i_a = 0
for j in range(20):
# f is already computed
f_i_j = new_pssm[j]
# g_i_a = \sum_{j} f_i_j * q_j_a / P_j
g_i_a += f_i_j * bg_matrix[j][a] / np.sum(bg_matrix[j])
g_i[a] = g_i_a
f_i = new_pssm
F_i = (alpha * f_i + beta * g_i) / (alpha + beta)
new_pssm = F_i
# 5. Normalize
new_pssm /= sum(new_pssm)
# 6. Background Frequencies
for aa in range(20):
new_pssm[aa] /= sum(bg_matrix[aa])
# 7. Log
new_pssm = 2 * np.log2(new_pssm)
# Replace -inf with -20
new_pssm[new_pssm == -np.inf] = -20
pssm[i] = new_pssm
return np.rint(pssm).astype(np.int64)
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return self.seq_count, self.seq_len
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
return self.seqs[0].replace('-', '')
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
return self.weights
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
return self.observations
<file_sep># -*- coding: utf-8 -*-
"""
IMPORTANT!:
Before writing an email asking questions such as
'What does this input has to be like?' or
'What return value do you expect?' PLEASE read our
exercise sheet and the information in this template
carefully.
If something is still unclear, PLEASE talk to your
colleagues before writing an email!
If you experience technical issues or if you find a
bug we are happy to answer your questions. However,
in order to provide quick help in such cases we need
to avoid unnecessary emails such as the examples
shown above.
"""
from Bio.PDB.MMCIFParser import MMCIFParser # Tip: This module might be useful for parsing...
import numpy as np
from Bio.PDB.Polypeptide import is_aa
from Bio.PDB.Polypeptide import three_to_one
############# Exercise 2: Protein Data Bank #############
# General remark: In our exercise every structure will have EXACTLY ONE model.
# This is true for nearly all X-Ray structures. NMR structures have several models.
class PDB_Parser:
CIF_PARSER= MMCIFParser() # parser object for reading in structure in CIF format
def __init__( self, path ):
'''
Initialize every PDB_Parser with a path to a structure-file in CIF format.
An example file is included in the repository (7ahl.cif).
Tip: Store the parsed structure in an object variable instead of parsing it
again & again ...
'''
parser = MMCIFParser()
self.structure =parser.get_structure(structure_id='7ahl', filename=path)[0] # Parse the structure once and re-use it in the functions below
# self.structure
# 3.8 Chains
def get_number_of_chains( self ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
Return:
Number of chains in this structure as integer.
'''
n_chains = 42
return len(self.structure)
# 3.9 Sequence
def get_sequence( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the amino acid sequence (single-letter alphabet!) of a given chain (chain_id)
in a Biopython.PDB structure as a string.
'''
st=""
for residue in self.structure[chain_id].child_list:
if is_aa(residue.resname, standard=True):
st+=three_to_one(residue.resname)
return st
# 3.10 Water molecules
def get_number_of_water_molecules( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the number of water molecules of a given chain (chain_id)
in a Biopython.PDB structure as an integer.
'''
water=0
for residue in self.structure[chain_id].child_list:
if residue.resname=='HOH':
water+=1
return water
# 3.11 C-Alpha distance
def get_ca_distance( self, chain_id_1, index_1, chain_id_2, index_2 ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id_1 : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
index_1 : index of a residue in a given chain in a Biopython.PDB structure
chain_id_2 : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
index_2 : index of a residue in a given chain in a Biopython.PDB structure
chain_id_1 and index_1 describe precisely one residue in a PDB structure,
chain_id_2 and index_2 describe the second residue.
Return:
Return the C-alpha (!) distance between the two residues, described by
chain_id_1/index_1 and chain_id_2/index_2. Round the returned value via int().
The reason for using two different chains as an input is that also the distance
between residues of different chains can be interesting.
Different chains in a PDB structure can either occur between two different proteins
(Heterodimers) or between different copies of the same protein (Homodimers).
'''
r1=self.structure[chain_id_1][index_1]
r2=self.structure[chain_id_2][index_2]
# i=1
# r1=0
# for residue in ch1.child_list:
# if(i==index_1):
# r1=residue
# break
# i+=1
#
# i=1
# r2=0
# for residue in ch2.child_list:
# if(i==index_2):
# r2=residue
# break
# i+=1
c1=[]
c2=[]
for atom in r1:
if(atom.name=="CA"):
c1=atom.coord
break
for atom in r2:
if(atom.name=="CA"):
c2=atom.coord
break
ca_distance = np.linalg.norm(c1-c2)
return int( ca_distance )
# 3.12 Contact Map
def get_contact_map( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return a complete contact map (see description in exercise sheet)
for a given chain in a Biopython.PDB structure as numpy array.
The values in the matrix describe the c-alpha distance between all residues
in a chain of a Biopython.PDB structure.
Only integer values of the distance have to be given (see below).
'''
# length = 10
# protein_seq = self.get_sequence(chain_id)
length = len(self.structure[chain_id])-self.get_number_of_water_molecules(chain_id)
contact_map = np.zeros( (length,length), dtype=np.float32 )
# return contact_map.astype( np.int ) # return rounded (integer) values
for ii in range(length):
for jj in range(length):
chain_id_1=chain_id
chain_id_2=chain_id
index_1=ii+1
index_2=jj+1
ch1=self.structure[chain_id_1]
ch2=self.structure[chain_id_2]
i=1
r1=0
for residue in ch1.child_list:
if(i==index_1):
r1=residue
break
i+=1
i=1
r2=0
for residue in ch2.child_list:
if(i==index_2):
r2=residue
break
i+=1
c1=[]
c2=[]
for atom in r1:
if(atom.name=="CA"):
c1=atom.coord
break
for atom in r2:
if(atom.name=="CA"):
c2=atom.coord
break
ca_distance = int(np.linalg.norm(c1-c2))
contact_map[ii][jj]=ca_distance
return contact_map.astype( np.int )
# 3.13 B-Factors
def get_bfactors( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the B-Factors for all residues in a chain of a Biopython.PDB structure.
The B-Factors describe the mobility of an atom or a residue.
In a Biopython.PDB structure B-Factors are given for each atom in a residue.
Calculate the mean B-Factor for a residue by averaging over the B-Factor
of all atoms in a residue.
Sometimes B-Factors are not available for a certain residue;
(e.g. the residue was not resolved); insert np.nan for those cases.
Finally normalize your B-Factors using Standard scores (zero mean, unit variance).
You have to use np.nanmean, np.nanvar etc. if you have nan values in your array.
The returned data structure has to be a numpy array rounded again to integer.
'''
import numpy as np
protein_seq = self.get_sequence(chain_id)
length = len(protein_seq)
arr = np.zeros(length, dtype=np.float32)
################
############################
###########
ctr = 0
chain = self.structure[chain_id]
for residue in chain.get_list():
val = 0
val_cnt = 0
fl = False
if is_aa(residue.get_resname(), standard=True):
#
fl = True
for atom in residue:
val_cnt += 1
val+= atom.get_bfactor()
#
if fl:
arr[ctr] = val/val_cnt
ctr += 1
print(arr)
arr = (arr-np.mean(arr))/np.std(arr)
b_factors = np.array(arr, dtype=np.float32)
#
return b_factors.astype(np.int) # return rounded (integer) values
def main():
print('PDB parser class.')
return None
if __name__ == '__main__':
main()
<file_sep>import numpy as np
import itertools
from pathlib import Path
import json
import re
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.db = []
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
self.db.append(sequence)
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
result = []
for seq in self.db:
if word in seq:
result.append(seq)
return result
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
all_words = itertools.product(list(ALPHABET),repeat=3)
num_seqs = len(self.db)
# Get all words in any seq
words_in_all_seqs = []
for word in all_words:
word = ''.join(word)
word_found=False
for seq in self.db:
if word in seq:
words_in_all_seqs.append(word)
word_found=True
break
if not word_found:
print(f'{word} not found')
# count words for any seq
num_words_by_seq = []
for seq in self.db:
counter = 0
for word in words_in_all_seqs:
if word in seq:
counter += 1
num_words_by_seq.append(counter)
# count sequences for word
num_seqs_by_word = []
for word in words_in_all_seqs:
counter = 0
for seq in self.db:
if word in seq:
counter += 1
num_seqs_by_word.append(counter)
num_seqs_in_db = len(self.db)
num_words_in_db_seqs = len(words_in_all_seqs)
average_diff_words_by_seq = int(np.round(np.mean(num_words_by_seq)))
avg_num_seq_by_words = int(np.round(np.mean(num_seqs_by_word)))
return (num_seqs_in_db, num_words_in_db_seqs, average_diff_words_by_seq, avg_num_seq_by_words)
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.mat = substitution_matrix
def get_word_score(self, word, test_seq):
score = 0
for w, t in zip(word, test_seq):
score += self.mat[w,t]
return score
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
int_ALPHABET = [AA_TO_INT[s] for s in ALPHABET]
if sequence is not None:
result = []
int_seq = []
for s in sequence:
int_seq.append(AA_TO_INT[s])
for i in range(len(int_seq)-2):
test_seq = int_seq[i:i+3]
for w in itertools.product(int_ALPHABET, repeat=3):
score = self.get_word_score(w, test_seq)
if score >= T:
result.append(''.join([INT_TO_AA[c] for c in w]))
result = list(np.unique(result))
if pssm is not None:
result = []
for i in range(pssm.shape[0]-2):
test_pssm = pssm[i:i+3, :]
for w in itertools.product(int_ALPHABET, repeat=3):
score = 0
for k in range(3):
score += test_pssm[k, w[k]]
if score >= T:
result.append(''.join([INT_TO_AA[c] for c in w]))
result = list(np.unique(result))
return result
def get_words_plus(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
int_ALPHABET = [AA_TO_INT[s] for s in ALPHABET]
result = []
if sequence is not None:
int_seq = []
for s in sequence:
int_seq.append(AA_TO_INT[s])
for i in range(len(int_seq)-2):
test_seq = int_seq[i:i+3]
for w in itertools.product(int_ALPHABET, repeat=3):
score = self.get_word_score(w, test_seq)
if score >= T:
result.append([w, i, score])
if pssm is not None:
for i in range(pssm.shape[0]-2):
test_pssm = pssm[i:i+3, :]
for w in itertools.product(int_ALPHABET, repeat=3):
score = 0
for k in range(3):
score += test_pssm[k, w[k]]
if score >= T:
result.append([w, i, score])
return result
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
words_plus = self.get_words_plus(sequence=query, pssm=pssm, T=T)
result_dict = {}
for w in words_plus:
word = w[0]
str_word = ''.join([INT_TO_AA[c] for c in w[0]])
query_start_index = w[1]
curr_score = w[2]
for seq in blast_db.get_sequences(str_word):
target_starts = [m.start() for m in re.finditer(str_word, seq)]
result_tuples = []
for target_start in target_starts:
result_tuple = self.get_score_and_alignment(seq, target_start, curr_score, query_start_index, X, query=query, pssm=pssm)
if result_tuple[-1] >= S:
result_tuples.append(result_tuple)
if len(result_tuples) > 0:
try:
result_dict[seq]
except:
result_dict[seq] = []
result_dict[seq].extend(result_tuples)
for k in result_dict.keys():
tmp = result_dict[k]
result_dict[k] = list(set(tmp))
return result_dict
def get_score_and_alignment(self, target_seq, target_start, curr_score, query_start_index, X, query=None, pssm=None):
result = []
high_score = curr_score
q_i = query_start_index
t_i = target_start
if query is not None:
query_int = [AA_TO_INT[s] for s in query]
target_seq_int = [AA_TO_INT[s] for s in target_seq]
# expand to the right..
i = 2
high_i = i
while True:
try:
i += 1
curr_score = curr_score + self.mat[query_int[q_i+i], target_seq_int[t_i+i]]
if curr_score > high_score:
high_score = curr_score
high_i = i
if curr_score <= high_score-X:
# curr_score -= self.mat[query_int[q_i+i], target_seq_int[t_i+i]]
break
except:
break
i = high_i
curr_score = high_score
j = -1
high_j = 0
while True:
try:
if q_i+j<0 or t_i+j<0:
raise Exception
curr_score = curr_score + self.mat[query_int[q_i+j], target_seq_int[t_i+j]]
if curr_score > high_score:
high_score = curr_score
high_j = j
if curr_score <= high_score-X:
# curr_score -= self.mat[query_int[q_i+j], target_seq_int[t_i+j]]
break
except:
break
j -= 1
j = high_j
return (q_i+j, t_i+j, i-j+1, high_score)
if pssm is not None:
target_seq_int = [AA_TO_INT[s] for s in target_seq]
# expand to the right..
i = 2
high_i = i
while True:
try:
i += 1
# curr_score = curr_score + self.mat[query_int[q_i + i], target_seq_int[t_i + i]]
if curr_score > high_score:
high_score = curr_score
high_i = i
if curr_score <= high_score - X:
# curr_score -= self.mat[query_int[q_i+i], target_seq_int[t_i+i]]
break
except:
break
i = high_i
curr_score = high_score
j = -1
high_j = 0
while True:
try:
if q_i + j < 0 or t_i + j < 0:
raise Exception
# curr_score = curr_score + self.mat[query_int[q_i + j], target_seq_int[t_i + j]]
if curr_score > high_score:
high_score = curr_score
high_j = j
if curr_score <= high_score - X:
# curr_score -= self.mat[query_int[q_i+j], target_seq_int[t_i+j]]
break
except:
break
j -= 1
j = high_j
return (q_i + j, t_i + j, i - j + 1, high_score)
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]
def json_data():
test_json = 'blast_test.json'
relative_path = Path(__file__).parent
with Path(relative_path, test_json).open('r') as json_file:
json_data = json.load(json_file)
return json_data
if __name__ == '__main__':
db_debug = False
json_data = json_data()
db = BlastDb()
sequences = json_data['db_sequences']
stats = json_data['db_stats']
print(stats)
for s in sequences:
db.add_sequence(s)
if db_debug:
db.get_db_stats()
db_debug2 = False
sub_matrix = np.array(json_data['sub_matrix'], dtype=np.int64)
query_seq = json_data['query_seq']
query_pssm = np.array(json_data['query_pssm'], dtype=np.int64)
test = Blast(sub_matrix)
if db_debug2:
test.get_words(pssm=query_pssm, T=11)
hit_result = json_data['blast_hsp_one_hit_1']
res = test.search_one_hit(db, query=query_seq, T=13, X=5, S=30)
print("wtf")
<file_sep>##############
# Exercise 2.5
##############
# You can use the supplied test cases for your own testing. Good luck!
import math
code_dict = {
"ATG": "M", #start
"TGA": "Z", "TAA": "Z", "TAG": "Z", #stop
"GCT": "A", "GCC": "A", "GCA": "A", "GCG": "A",
"TGT": "C", "TGC": "C",
"GGT": "G", "GGC": "G", "GGA": "G", "GGG": "G",
"CAT": "H", "CAC": "H",
"ATT": "I", "ATC": "I", "ATA": "I",
"TCT": "S", "TCC": "S", "TCA": "S", "TCG": "S", "AGT": "S", "AGC": "S",
"ACT": "T", "ACC": "T", "ACA": "T", "ACG": "T",
"GTT": "V", "GTC": "V", "GTA": "V", "GTG": "V",
"AAA": "K", "AAG": "K",
"TTA": "L", "TTG": "L", "CTT": "L", "CTC": "L", "CTA": "L", "CTG": "L",
"TGG": "W",
"GAT": "D", "GAC": "D",
"GAA": "E", "GAG": "E",
"TTT": "F", "TTC": "F",
"AAT": "N", "AAC": "N",
"CCT": "P", "CCC": "P", "CCA": "P", "CCG": "P",
"CAA": "Q", "CAG": "Q",
"CGT": "R", "CGC": "R", "CGA": "R", "CGG": "R", "AGA": "R", "AGG": "R",
"TAT": "Y", "TAC": "Y",
}
def complement(s):
s_out = ""
for i in range(0, len(s)):
if s[i] == "T":
s_out += "A"
elif s[i] == "A":
s_out += "T"
elif s[i] == "C":
s_out += "G"
elif s[i] == "G":
s_out += "C"
return s_out
def codons_to_aa(orf):
if len(orf) % 3 is not 0:
return None
codons = [orf[i:i + 3] for i in range(0, len(orf), 3)]
aa = ''.join(code_dict[c] for c in codons)
return aa
def get_orfs(genome):
genome = genome.upper()
for i in genome:
if i != 'A' and i != 'C' and i != 'G' and i != 'T':
raise TypeError('Given sequence is not a DNA sequence!')
comp_genome = complement(genome)
comp_genome = comp_genome[::-1]
sequences = [genome, comp_genome]
orfs = list()
for i, frame in enumerate(sequences):
flag = i == 1
for f_index in range(0, 3):
orfs_frames = list()
circular_seq = frame + frame + frame
temp_frame = circular_seq[f_index: f_index + ((len(frame) * 2 - f_index) // 3) * 3]
codon = codons_to_aa(temp_frame)
for start in range(0, math.ceil(len(frame) / 3)):
if codon[start] == 'M':
offset = 0
while offset < math.ceil(len(frame) / 3):
stop = start + offset + 1
if codon[stop] == 'Z':
if offset > 33:
orf = codon[start:stop]
if not flag:
dna_start = f_index + start * 3
dna_stop = (f_index + stop * 3 + 2) % len(frame)
else:
dna_start = len(frame) - (f_index + start * 3) - 1
dna_stop = len(frame) - ((f_index + stop * 3 + 2) % len(frame)) - 1
orfs_frames.append((dna_start, dna_stop, orf, flag))
break
offset += 1
sample_indices_to_remove = set()
for i in range(len(orfs_frames)):
max_length = len(orfs_frames[i][2])
stop_value = orfs_frames[i][1]
for j in range(i + 1, len(orfs_frames)):
if orfs_frames[j][1] != stop_value:
continue
if len(orfs_frames[j][2]) > max_length:
max_length = len(orfs_frames[j][2])
sample_indices_to_remove.add(i)
else:
sample_indices_to_remove.add(j)
sample_indices_to_remove_sorted = sorted(sample_indices_to_remove, key=int, reverse=True)
for index in sample_indices_to_remove_sorted:
del orfs_frames[index]
orfs.extend(orfs_frames)
get_idx_to_remove = set()
for i in range(len(orfs)):
max_length = len(orfs[i][2])
stop_value = orfs[i][1]
flag = orfs[i][3]
for j in range(i + 1, len(orfs)):
if orfs[j][1] != stop_value or orfs[j][3] != flag:
continue
if len(orfs[j][2]) > max_length:
max_length = len(orfs[j][2])
get_idx_to_remove.add(i)
else:
get_idx_to_remove.add(j)
get_idx_to_remove_sorted = sorted(get_idx_to_remove, key=int, reverse=True)
for idx in get_idx_to_remove_sorted:
del orfs[idx]
return orfs
<file_sep>import numpy as np
import itertools
from pathlib import Path
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.word2seqs = {}
self.seqs = []
for word in self.get_all_possible_words():
self.word2seqs[word] = []
@staticmethod
def get_all_possible_words():
return list(map(''.join, itertools.product(ALPHABET, repeat=3)))
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
seq_id = len(self.seqs)
self.seqs.append(sequence)
# update word2sec
words = self.get_words_from_sequence(sequence)
for word in words:
self.word2seqs[word].append(seq_id)
@staticmethod
def get_words_from_sequence(sequence):
words = set()
for i in range(len(sequence) - 2):
word = sequence[i:i + 3]
words.add(word)
return words
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
seq_ids = self.word2seqs[word]
return list(map(lambda id: self.seqs[id], seq_ids))
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corresponding to the mentioned
statistics (in order of listing above).
"""
num_seqs = len(self.seqs)
num_words = 0
avg_seqs_per_word = 0
for seq_ids in self.word2seqs.values():
num_seqs_with_word = len(seq_ids)
if num_seqs_with_word > 0: # a word is included in some sequence
num_words += 1
avg_seqs_per_word += num_seqs_with_word # word is used by n sequences
avg_seqs_per_word = round(avg_seqs_per_word / num_words)
avg_words_per_seq = 0
for seq in self.seqs:
words = self.get_words_from_sequence(seq)
avg_words_per_seq += len(words)
avg_words_per_seq = round(avg_words_per_seq / num_seqs)
return num_seqs, num_words, avg_words_per_seq, avg_seqs_per_word
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.sub_matrix = substitution_matrix
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
if sequence is None:
sequence = pssm
words = set()
# match every possible word at every possible position
for word in BlastDb.get_all_possible_words():
for i in range(len(sequence) - 2):
seq_word = sequence[i:i + 3]
score = self.word_alignment_score(word, seq_word)
if score >= T:
words.add(word)
return words
def word_alignment_score(self, word, word_or_pssm):
score = 0
for i in range(len(word)):
aa1 = AA_TO_INT[word[i]]
if isinstance(word_or_pssm, str):
aa2 = AA_TO_INT[word_or_pssm[i]]
score += self.sub_matrix[aa1][aa2]
else:
score += word_or_pssm[i][aa1]
return score
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplicates).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
target2hsps = dict() # maps targets to SETS of hsps
if query is None:
query = pssm
for query_pos in range(len(query) - 2):
# for every position of the query, find words that pass the threshold T
query_word = query[query_pos:query_pos + 3]
words = self.get_words(sequence=query_word, T=T) # will return words only for current position
for word in words:
# look up target sequences in the DB that match given word
targets = blast_db.get_sequences(word)
for target in targets:
# the word may occur multiple times in the target -> find all start indices
target_starts = self.find_starts(target, word)
for target_start in target_starts:
# extend aligned words to hsp
hsp = self.extend_to_hsp(target, target_start, query, query_pos, X, S)
if hsp is not None:
# add hsp
if target not in target2hsps:
target2hsps[target] = set()
target2hsps[target].add(hsp)
# convert sets to lists
for target in target2hsps.keys():
hsps = target2hsps[target]
target2hsps[target] = list(hsps)
return target2hsps
def extend_to_hsp(self, target: str, target_start, query, query_start, X, S):
"""
Extend aligned words to HSP
HSP tuple format: (start in query, start in target, length, score)
:param target: target sequence (string of aa)
:param target_start: start index in the target sequence
:param query: query sequence or query PSSM
:param query_start: start index in the query sequence/PSSM
:param X: drop-off threshold X during extension
:param S: score threshold S for the HSP
:return: best HSP tuple after extension or None if conditions are violated
"""
length = 3
best_hsp = (-1, -1, -1, -1)
# extend length (extend to the right)
while True:
best_score = best_hsp[3]
target_word = target[target_start: target_start + length]
query_word = query[query_start: query_start + length]
score = self.word_alignment_score(target_word, query_word)
if score <= best_score - X:
break # stop extension
if best_score < score:
best_hsp = (query_start, target_start, length, score)
length += 1
if query_start + length > len(query) or target_start + length > len(target):
break # end reached
# extend to the left
length = best_hsp[2]
while True:
best_score = best_hsp[3]
target_start -= 1
query_start -= 1
if target_start < 0 or query_start < 0:
break # start reached
length += 1
target_word = target[target_start: target_start + length]
query_word = query[query_start: query_start + length]
score = self.word_alignment_score(target_word, query_word)
if score <= best_score - X:
break # stop extension
if best_score < score:
best_hsp = (query_start, target_start, length, score)
# extension complete
best_score = best_hsp[3]
if best_score < S:
return None
else:
return best_hsp
@staticmethod
def find_starts(sequence: str, word):
starts = []
index = 0
while True:
index = sequence.find(word, index)
if index != -1:
starts.append(index) # found next position
else:
return starts
index += 1
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
if query is None:
query = pssm
# a hit is a tuple (target_pos, query_pos)
# diagonal is defined as: target_pos - query_pos
hits = dict() # maps targets to hits (target_pos, query_pos)
# find all hits for every target in DB
for query_pos in range(len(query) - 2):
# for every position of the query, find words that pass the threshold T when aligned with query
query_word = query[query_pos:query_pos + 3]
words = self.get_words(sequence=query_word, T=T) # will return words only for current position
for word in words:
# look up target sequences in the DB that contain given word
targets = blast_db.get_sequences(word)
for target in targets:
# the word may occur multiple times in the target -> find all start indices
occurrences = self.find_starts(target, word)
for target_pos in occurrences:
hit = (target_pos, query_pos)
# save hit
if target not in hits:
hits[target] = []
hits[target].append(hit)
# given the hits, find hsps using two-hit method
target2hsps = dict() # maps targets to lists of hsps
for target, target_hits in hits.items():
# sort hits into diagonals
diagonals = dict()
for hit in target_hits:
target_pos, query_pos = hit
diagonal = target_pos - query_pos
if diagonal not in diagonals:
diagonals[diagonal] = []
diagonals[diagonal].append(hit)
hsps = self.find_hsps_from_digonals(query, target, diagonals, X, S, A)
if len(hsps) > 0:
target2hsps[target] = hsps
return target2hsps
def find_hsps_from_digonals(self, query, target, diagonals, X, S, A):
hsps = []
for diagonal_hits in diagonals.values():
diagonal_hsps = []
for left in range(len(diagonal_hits) - 1):
left_hit = diagonal_hits[left]
if self.hit_hsps_overlap(left_hit, diagonal_hsps):
continue
# find non overlapping right hit
right_hit = None
for right in range(left+1, len(diagonal_hits)):
hit = diagonal_hits[right]
if hit[1] < left_hit[1] + 3:
continue # R < L or overlap between hits
else:
right_hit = hit # right hit has no overlap
break
if right_hit is None:
continue # no right hit
if right_hit[1] > left_hit[1] + A:
continue # right hit is too far
# left_hit and right_hit are HSP-candidates -> evaluate and update diagonal_hsps
hsp = self.extend_two_hits_to_hsp(query, target, left_hit, right_hit, X, S)
if hsp is not None:
diagonal_hsps.append(hsp)
# filter hsps by score S
diagonal_hsps = list(filter(lambda x: x[3] >= S, diagonal_hsps))
hsps.extend(diagonal_hsps)
return hsps
def extend_two_hits_to_hsp(self, query, target, left_hit, right_hit, X, S):
# hit: (target_pos, query_pos)
# HSP: (query_pos, target_pos, length, score)
# init at right hit
length = 3
target_start, query_start = right_hit
target_word = target[target_start: target_start + length]
query_word = query[query_start: query_start + length]
score = self.word_alignment_score(target_word, query_word)
best_hsp = (query_start, target_start, length, score)
# extend right_hit to the left first
while target_start > 0 and query_start > 0:
# step to the left
best_score = best_hsp[3]
target_start -= 1
query_start -= 1
length += 1
target_word = target[target_start: target_start + length]
query_word = query[query_start: query_start + length]
score = self.word_alignment_score(target_word, query_word)
if score <= best_score - X:
break # stop extension
if best_score < score:
best_hsp = (query_start, target_start, length, score)
query_start, target_start, length, score = best_hsp
# if the extension reaches end of left_hit they form a HSP
if query_start > left_hit[1] + 3:
return None # left hit not reached, no HSP formed
# try extend to the right (extend length)
while True:
length += 1
if query_start + length > len(query) or target_start + length > len(target):
break # end reached
best_score = best_hsp[3]
target_word = target[target_start: target_start + length]
query_word = query[query_start: query_start + length]
score = self.word_alignment_score(target_word, query_word)
if score <= best_score - X:
break # stop extension
if best_score < score:
best_hsp = (query_start, target_start, length, score)
return best_hsp # no filtering by S here, this will be done later
def hit_hsps_overlap(self, hit, hsps):
# hit: (target_pos, query_pos)
# HSP: (query_pos, target_pos, length, score)
for hsp in hsps:
hsp_query_pos = hsp[0]
hsp_length = hsp[2]
hit_query_pos = hit[1]
hit_length = 3
# overlap if there is no separating axis: NOT (hit_right < hsp_left OR hsp_right < hit_left)
if not (hit_query_pos + hit_length < hsp_query_pos or hsp_query_pos + hsp_length < hit_query_pos):
return True # overlap
return False
<file_sep>##############
# Exercise 2.7
##############
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
pass
def isNegativelyCharged(aa):
pass<file_sep>##############
# Exercise 2.6
##############
from collections import Counter
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.__headers = []
self.read_fasta(filepath)
def add_sequence(self,sequence):
if sequence.endswith('*'):
sequence= sequence[:-1]
self.__sequences.append(sequence)
def get_counts(self):
return len(self.__sequences)
def get_average_length(self):
num_seq =len(self.__sequences)
if(num_seq == 0):
return 0
overall_len = 0
for seq in self.__sequences:
overall_len += len(seq)
return overall_len/num_seq
def read_fasta(self, path):
with open(path, 'r') as f:
seq = ''
sequence_started = False
for line in f:
if line.startswith('>') or line.startswith(';'):
if sequence_started:
self.add_sequence(seq)
seq = ''
sequence_started = True
self.__headers.append(line.strip())
else:
seq += line.strip()
self.add_sequence(seq)
def get_abs_frequencies(self):
# frequencies
if len(self.__sequences) > 0:
all_seq = ''.join(self.__sequences)
return dict(Counter(all_seq))
else:
return None
def get_av_frequencies(self):
# avg frequencies
if len(self.__sequences) > 0:
abs_freq = self.get_abs_frequencies()
return {k: v/len(''.join(self.__sequences)) for k, v in abs_freq.items()}
else:
return None
<file_sep>##############
# Exercise 2.7
##############
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
if aa in 'KRH':
return True
return False
def isNegativelyCharged(aa):
if aa in 'DE':
return True
return False
def isHydrophobic(aa):
if aa in 'VILFWYMA':
return True
return False
def isAromatic(aa):
if aa in 'FWYH':
return True
return False
def isPolar(aa):
if aa in 'RNDQEHKSTY':
return True
return False
def isProline(aa):
if aa == 'P':
return True
return False
def containsSulfur(aa):
if aa == 'C' or aa == 'M':
return True
return False
def isAcid(aa):
if aa == 'D' or aa == 'E':
return True
return False
def isBasic(aa):
if aa in 'RHK':
return True
return False<file_sep>##############
# Exercise 2.7
##############
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
if aa == 'R' or aa == 'H' or aa == 'K':
return True
return False
def isNegativelyCharged(aa):
if aa == 'D' or aa == 'E':
return True
return False
def isHydrophobic(aa):
if aa == 'A' or aa == 'V' or aa == 'I' or aa == 'L' or aa == 'M' or aa == 'F' or aa == 'Y' or aa == 'W':
return True
return False
def isAromatic(aa):
if aa == 'F' or aa == 'W' or aa == 'Y' or aa == 'H':
return True
return False
def isPolar(aa):
if aa == 'R' or aa == 'N' or aa == 'D' or aa == 'E' or aa == 'Q' or aa == 'H' or aa == 'K' or aa == 'S' or aa == 'T' or aa == 'Y':
return True
return False
def isProline(aa):
if aa == 'P':
return True
return False
def containsSulfur(aa):
if aa == 'C' or aa == 'M':
return True
return False
def isAcid(aa):
if aa == 'D' or aa == 'E':
return True
return False
def isBasic(aa):
if aa == 'R' or aa == 'H' or aa == 'K':
return True
return False
<file_sep>import numpy as np
from collections import Counter
import json
from pathlib import Path
def json_data():
test_json = './tests/pssm_test.json'
relative_path = Path(__file__).parent
with Path(relative_path, test_json).open('r') as json_file:
json_data = json.load(json_file)
return json_data
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences, log=False):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
if len(sequences) < 1:
print("ERROR: MSA does not conatain a sequence!: {}".format(sequences))
raise TypeError
self.n = len(sequences)
self.m = len(sequences[0])
print("Input MSA with {} sequences of (gapped) length {}".format(self.n,self.m))
for seq in sequences:
if len(seq) != self.m:
print("ERROR: Sequence does not have the same length!: {}".format(seq))
raise TypeError
check_sequence = str(seq)
for aa_opt in ALPHABET:
check_sequence = check_sequence.replace(aa_opt, '')
if check_sequence != '':
print("ERROR: Sequence conatains illegal characters!: seq {} becomes {}".format(seq, check_sequence))
raise TypeError
self.sequences = sequences
self.msa_columns = [''.join([seq[i] for seq in self.sequences]) for i in range(0,self.m) ]
# Unweighted Counts
self.seq_counters = []
self.pssm_counts = np.zeros((self.m, len(ALPHABET)),dtype=np.int64)
for idx_msa_col, msa_col in enumerate(self.msa_columns):
msa_col_count = Counter(msa_col)
self.seq_counters.append(msa_col_count)
for idx_aa, aa in enumerate(ALPHABET):
self.pssm_counts[idx_msa_col,idx_aa] = msa_col_count[aa]
if log:
#np.set_printoptions(threshold=np.inf)
print("pssm_counts: \n{}".format(self.pssm_counts[:7,])) #
#np.set_printoptions(threshold=1000)
# Sequence weights
# w_i,k = 1 / ( r_i * s_i,k )
self.sequence_weight_matrix = np.zeros((self.m,self.n),dtype=np.float64)
self.number_unique_aa_in_msa_col = np.zeros(self.m)
for idx_msa_col, msa_col in enumerate(self.msa_columns):
msa_column_counter = self.seq_counters[idx_msa_col]
self.number_unique_aa_in_msa_col[idx_msa_col] = len(msa_column_counter.keys())
for idx_seq, seq in enumerate(self.sequences):
aa_at_sequence_col = seq[idx_msa_col]
self.sequence_weight_matrix[idx_msa_col,idx_seq] = 1.0 / (self.number_unique_aa_in_msa_col[idx_msa_col] * msa_column_counter[aa_at_sequence_col])
self.sequence_weights = np.sum(self.sequence_weight_matrix[self.number_unique_aa_in_msa_col > 1], axis=0)
if log:
print("sequence_weights of len {} for {} sequences: {}".format(len(self.sequence_weights), self.n, self.sequence_weights))
self.num_independent_observation = np.mean(self.number_unique_aa_in_msa_col)
# Weighted Counts (by self.sequence_weights)
self.pssm_counts_weighted = np.zeros((self.m, len(ALPHABET)),dtype=np.float64)
for idx_msa_col, msa_col in enumerate(self.msa_columns):
for idx_seq, seq in enumerate(self.sequences):
aa_residue = msa_col[idx_seq] # seq[idx_msa_col] == msa_col[idx_seq]
self.pssm_counts_weighted[idx_msa_col, AA_TO_INT[aa_residue]] += self.sequence_weights[idx_seq]
if log:
#np.set_printoptions(threshold=np.inf)
print("pssm_counts_weighted: \n{}".format(self.pssm_counts_weighted[:7,])) #
#np.set_printoptions(threshold=1000)
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
if bg_matrix is not None:
# Initialize background frequencies bg_vector
bg_vector = np.sum(bg_matrix, axis=1)
pssm_counts = self.pssm_counts
if use_sequence_weights:
# w_i,k = 1 / ( r_i * s_i,k )
pssm_counts = self.pssm_counts_weighted
if redistribute_gaps:
gap_counts = pssm_counts[:,GAP_INDEX][:,np.newaxis]
if bg_matrix is None:
# Redistribute gaps with uniform background frequencies (1/20)
pssm_counts = np.add(pssm_counts, gap_counts * 0.05)
else:
# Redistribute gaps with provided background frequencies
bg_gap_matrix = np.multiply(bg_vector, gap_counts) # row x column multiplication -> matrix
pssm_counts = np.add(pssm_counts[:,:20],bg_gap_matrix)
if add_pseudocounts:
pseudocounts = np.zeros((self.m,20),dtype=np.float64)
for idx_aa, aa in enumerate(ALPHABET[:-1]):
for idx_msa_col, msa_col in enumerate(self.msa_columns):
#g_i,a = sum_j( (f_i,j / Pj) * qj;a )
aa_pseudocount_sum = 0
for idx_cur_aa, cur_aa in enumerate(ALPHABET[:-1]):
if bg_matrix is None:
aa_pseudocount_sum += (pssm_counts[idx_msa_col,idx_cur_aa] / 0.05) * 0.0025 # uniform background and substitution frequencies
else:
aa_pseudocount_sum += (pssm_counts[idx_msa_col,idx_cur_aa] / bg_vector[idx_cur_aa]) * bg_matrix[idx_aa][idx_cur_aa]
pseudocounts[idx_msa_col,idx_aa] = aa_pseudocount_sum
alpha = (self.num_independent_observation - 1)
pssm_counts = np.divide( alpha * pssm_counts[:,:20] + beta * pseudocounts, alpha + beta)
pssm_primary_gapless = pssm_counts[ np.array(list(self.sequences[0])) != '-' ][:,:20]
msa_col_sum_gapless = np.sum(pssm_primary_gapless, axis=1)
pssm_rel = np.divide(pssm_primary_gapless,msa_col_sum_gapless[:,np.newaxis])
if bg_matrix is None:
# Use uniform background frequencies (1/20)
pssm_rel_background = np.divide(pssm_rel,0.05)
else:
# Use provided background frequencies
pssm_rel_background = np.divide(pssm_rel,bg_vector)
# S_i,j = 2 * log_2 ( f_i,j / P_j )
pssm_rel_background_2log2 = np.multiply(2, np.log2(pssm_rel_background) )
pssm = pssm_rel_background_2log2
pssm[np.isneginf(pssm)] = -20
pssm = np.round(pssm, 0).astype(np.int64)
print("Result PSSM of shape {}".format(pssm.shape))
return pssm
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return (self.n, self.m)
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
return self.sequences[0].replace('-', '')
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
return self.sequence_weights
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
return self.num_independent_observation
def main():
print('MSA class.')
msa_sequences = [
"--------MVATGLFVGLNKGHVVTKRE----------QPPRPNNRKGKTSKRTIFIRNLIKEVAGQAPYEKRITELLKVGKD---KRALKVAKRKLGTHKRAKRKREEMSSVLRKMRSGGAGASEKKK----",
"----MTTPQVKTGLFVGLNKGHVVTRRE----------LAPRPRSRKGKTSKRTIFIRNLIKEVAGQAPYEKRITELLKVGKD---KRALKVAKRKLGTHKRAKRKREEMSSVLRKMRSGGGGATEKKK----",
"----MTTPAVKTGLFVGLNKGHVVTRRE----------LAPRPNSRKGKTSKRTIFIRKLIREVAGMAPYEKRITELLKVGKD---KRALKVAKRKLGTHKRAKRKREEMSSVLRKMRSLGGAAAAEKKM---"
]
'''
msa_sequences = [
"MVATGLFVGLNKGHVVTKREQPPRPNNRKGKTSKRTIFIRNLIKEVAGQAPYEKRITELLKVGKDKRALKVAKRKLGTHKRAKRKREEMSSVLRKMRSGGAGASEKKK",
"QVKTGLFVGLNKGHVVTRRELAPRPRSRKGKTSKRTIFIRNLIKEVAGQAPYEKRITELLKVGKDKRALKVAKRKLGTHKRAKRKREEMSSVLRKMRSGGGGATEKKK",
"AVKTGLFVGLNKGHVVTRRELAPRPNSRKGKTSKRTIFIRKLIREVAGMAPYEKRITELLKVGKDKRALKVAKRKLGTHKRAKRKREEMSSVLRKMRSLGGAAAAEKK"
]
'''
data = json_data()
msa = MSA(data['msa_sequences'], log=True)
bg_matrix = data['bg_matrix']
sequence_weights_expected = data['sequence_weights']
sequence_weights = msa.get_sequence_weights()
print("Sequence weight comparison: {}".format(np.sum(sequence_weights - sequence_weights_expected)))
result = msa.get_pssm(bg_matrix=bg_matrix,
redistribute_gaps=True,
use_sequence_weights=True,
add_pseudocounts=True)
np.set_printoptions(threshold=np.inf)
print("Result:")
print(result[:7,])
'''
for p in pssm_error:
try:
msa = MSA(p, log=True)
print(msa.get_size())
print(msa.get_primary_sequence())
print("****************************** Failed to raise error for invalid MSA! ******************************")
except:
print("****************************** Raise error OK! ******************************")
'''
''' sequence_weights
Got:
[[ 3, -2, -2, -1, -3, 0, -2, -1, -1, -2, 2, -2, 2, 3, -2, -1, 0, 1, -3, -2]
[ 0, -2, -3, -2, -2, -1, -3, 1, 0, 1, 3, -3, 2, -2, -2, -2, -1, 3, -3, -2]
[ 1, -3, -2, -1, -3, -2, -1, -1, 3, -2, 0, 1, -2, 1, 4, -1, -1, -2, -3, -3]
[-1, -3, -2, 0, 0, -1, -1, -2, 0, -2, -2, -1, 1, -1, -2, 1, 3, -2, -1, 5]
[-1, -3, -2, 1, -4, 5, -2, -4, -1, -4, -3, -1, 4, -2, -2, -1, -2, -4, -4, -4]
[-2, -2, -4, -3, -1, -4, -3, 3, -2, 3, 4, -3, -3, 1, -3, -3, -2, 0, -3, -2]
[ 4, -2, -3, -2, 3, -2, -3, 0, -2, -1, -1, -3, -2, -2, -3, -1, -1, 0, -2, -1]]
Expected:
[[ 2, -1, -1, -1, -2, 0, -1, -1, -1, -1, 1, -1, 2, 2, -1, 0, 0, 1, -2, -2],
[ 0, -2, -2, -2, -1, -1, -2, 1, 0, 1, 3, -2, 2, -2, -2, -1, -1, 3, -2, -2],
[ 1, -2, -2, -1, -3, -2, -1, -1, 3, -2, 0, 1, -2, 1, 4, -1, -1, -2, -3, -2],
[-1, -2, -2, 0, 0, -1, -1, -2, 0, -2, -2, -1, 1, -1, -1, 1, 3, -2, -1, 4],
[-1, -3, -1, 1, -3, 5, -2, -3, 0, -3, -3, -1, 3, -1, -2, -1, -2, -3, -3, -3],
[-2, -2, -3, -2, -1, -3, -2, 2, -2, 3, 4, -3, -3, 1, -2, -2, -1, 0, -2, -2],
[ 4, -1, -2, -2, 3, -1, -2, 0, -2, -1, -1, -2, -2, -2, -2, 0, -1, 0, -2, -1],
[ 0, -1, -3, -2, -1, -3, -3, 2, 0, 0, 0, -3, -2, -2, -2, -2,
-1, 4, -3, -2],
[-1, -3, -2, -2, -3, 6, -2, -3, -2, -3, -3, -1, -2, -2, -2, -1,
-2, -3, -3, -3],
[-2, -2, -3, -3, 1, -3, -3, 1, -3, 4, 1, -3, -3, -2, -2, -3,
-2, 1, -2, -1],
[-2, -3, 0, -1, -3, 0, 0, -3, 0, -3, -2, 6, -2, -1, -1, 0,
-1, -3, -3, -2],
[-1, -3, -1, 0, -3, -2, -1, -3, 6, -3, -2, -1, -1, 0, 1, -1,
-1, -2, -3, -2],
[-1, -3, -2, -2, -3, 6, -2, -3, -2, -3, -3, -1, -2, -2, -2, -1,
-2, -3, -3, -3],
[-2, -3, -2, -1, 1, -2, 7, -3, 1, -2, -2, -1, -2, 0, 0, -1,
-2, -3, -2, 1],
[ 1, -2, -2, -1, -2, -2, -2, 1, 3, -1, -1, -2, 0, 1, 0, -1,
0, 1, -3, -2],
[ 0, -1, -3, -2, -2, -3, -3, 1, -2, 0, 0, -2, -2, -2, -2, -1,
2, 4, -3, -2],
[ 0, -2, -2, -2, -1, -2, -2, -2, -2, -2, -2, -1, -2, -2, -2, 1,
5, -1, -2, 3],
[ 0, -4, -2, 0, -4, -2, -1, -3, 5, -3, -2, -1, -2, 3, 2, 0,
-1, -3, -3, -3],
[-2, -3, -2, -1, -3, -3, -1, 0, 3, 0, 1, 3, -3, 0, 4, -1,
-2, -2, -3, -2],
[ 0, -3, -1, 3, -3, -3, -2, -1, -1, -2, -1, -2, 0, 1, 0, -1,
1, 3, -3, -2],
[-1, -2, -2, -2, -2, -1, -2, 0, 0, 2, 0, -2, 1, 1, -2, 1,
0, 2, -3, 0],
[ 2, -2, -2, -1, -3, 0, -2, -3, 4, -3, -2, -1, 2, 0, 0, -1,
0, -2, -3, -2],
[-2, -3, -2, 0, -3, -3, 2, -2, -1, -1, -2, -2, 6, 0, 2, -2,
-2, -1, -4, -2],
[-2, -4, -2, -1, -4, -3, -1, -4, 3, -3, -2, -1, -2, 0, 5, 0,
0, -3, -3, -3],
[ 0, -3, -2, -1, -3, 0, 4, 0, -1, -3, -2, -2, 5, 3, -2, 0,
-2, -1, -4, -2],
[-1, 2, -1, -1, -3, -2, -2, -2, -1, -3, -2, 2, -2, -1, 3, 4,
1, -1, -3, -3],
[-2, -4, 1, -1, 1, 1, -1, -3, 0, -3, -2, 0, -3, 1, 4, 1,
-2, -3, -2, 2],
[-2, -4, -2, -1, -3, -3, 1, -3, 2, -1, -2, -1, 3, 0, 5, -1,
-1, -3, -3, -2],
[-2, -4, -2, 0, -4, -2, -1, -4, 5, -3, -2, -1, -2, 0, 4, -1,
-2, -3, -3, -3],
[ 1, -3, -2, -2, -4, 5, -2, -4, -2, -4, -3, 1, -3, 1, -2, 0,
-2, -3, -3, -3],
[ 1, -3, -2, 2, -1, -3, 1, 0, 3, -2, -2, -2, -2, 0, 3, -1,
-2, 0, -3, -2],
[ 0, -2, -3, -2, -2, -3, -3, 0, -2, 3, 0, -2, -3, 1, -2, 0,
3, 0, -3, -2],
[ 0, -2, -1, -1, -3, 0, -2, -3, -1, -3, -2, -1, -2, 2, -1, 4,
3, -2, -3, -3],
[-2, -4, -1, 2, -4, -2, -1, -4, 5, -3, -2, 1, -2, 2, 2, -1,
-2, -3, -3, -3],
[-2, -4, -2, -1, -3, -3, 5, -4, 1, -3, -2, -1, -3, 0, 5, 1,
-2, -3, -3, -1],
[-1, -2, -2, -2, -3, -3, -3, -1, -1, -2, -1, -1, -2, -1, 2, 0,
5, 1, -3, -2],
[ 1, -3, -2, -1, 2, -2, 2, 1, 3, -2, -1, -2, -2, -1, 0, -1,
-2, -1, -3, -1],
[-1, -3, -4, -3, 6, -4, -2, 0, -2, 2, 0, -3, -4, -3, -3, -3,
-2, -1, -1, 1],
[ 1, -2, -4, -3, -2, -3, -4, 3, -3, 0, 2, -3, -3, -3, -3, -2,
-1, 4, -3, -2],
[ 1, -4, -2, -1, -4, -3, -1, -4, 1, -3, -2, -1, -3, 0, 6, -1,
-2, -3, -3, -3],
[ 1, -3, 4, 1, -4, -2, -1, -3, 0, -4, -3, 3, -2, 1, -2, 2,
-1, -3, -4, -3],
[-2, -2, -4, -3, -1, -4, -4, 2, -3, 3, 3, -4, -4, -3, -3, -3,
-1, 3, -3, -2],
[ 1, -2, -4, -3, -1, -3, -4, 4, -3, 0, 0, -4, -3, -3, -3, -2,
-1, 3, -3, -2],
[-2, -4, 2, -1, -4, -3, -1, -4, 3, -3, -2, -1, -3, 0, 6, -1,
-2, -3, -3, -3],
[-2, -4, 1, 6, -4, -3, -1, -4, 0, -4, -3, -1, -2, 1, -1, -1,
-2, -3, -3, -3],
[-1, -2, -4, -4, -1, -4, -4, 3, -3, 0, 0, -4, -3, -3, -3, -3,
-1, 5, -3, -2],
[ 3, 5, -2, -2, -3, -1, -3, -2, -2, -2, -2, -2, -2, -2, -2, 2,
1, 0, -4, -3],
[ 0, -3, -2, -3, -4, 6, -3, -4, -2, -4, -4, -1, -3, -3, -3, -1,
-2, -4, -3, -4],
[-3, -3, -4, -3, 5, -4, 2, -1, -3, 2, 1, -3, -4, 1, -3, -3,
-2, -1, 3, 1],
[ 5, -1, -2, -1, -3, -1, -2, -2, -1, -3, -2, -2, -2, -1, -2, 3,
0, -1, -3, -3],
[-2, -3, -2, -2, -5, -3, -3, -4, -2, -4, -3, -3, 8, -2, -3, -2,
-2, -3, -6, -3],
[-3, -3, -4, -3, 2, -4, 0, -2, -3, 0, -1, -3, -4, -2, -3, -3,
-2, -2, 1, 7],
[-2, -4, 1, 6, -4, -3, -1, -4, 0, -4, -3, -1, -2, 1, -1, -1,
-2, -3, -3, -3],
[-2, -4, -2, -1, -4, -3, -1, -4, 4, -3, -2, -1, -2, 0, 5, -1,
-2, -3, -3, -3],
[-2, -4, -2, -1, -4, -3, -1, -4, 3, -3, -2, -1, -3, 0, 6, -1,
-2, -3, -3, -3],
[ 3, -2, -3, -3, -2, -2, -3, 3, -2, 1, 0, -3, -3, -2, -3, -1,
0, 2, -3, -2],
[-2, -2, -3, -3, -1, -3, -3, 3, -2, 1, 5, -2, -3, -2, -3, 0,
2, 0, -3, -2],
[-1, -4, 2, 6, -4, -3, -1, -4, 0, -4, -3, -1, -2, 1, -1, 1,
-1, -3, -3, -3],
[-2, -2, -4, -4, 0, -4, -4, 1, -3, 5, 2, -4, -4, -3, -3, -3,
-2, 0, -3, -2],
[-2, -2, -4, -4, -1, -4, -4, 3, -3, 4, 1, -4, -4, -3, -3, -3,
-2, 0, -3, -2],
[-2, -4, -1, 2, -4, -3, -1, -4, 5, -3, -2, -1, -2, 0, 3, -1,
-2, -3, -3, -3],
[ 2, -2, -2, -2, -2, -2, -2, 1, -2, -1, -1, 2, -2, -2, -2, 0,
-1, 3, -3, -2],
[ 0, -2, -1, -1, -3, 3, -2, -3, 1, -3, -2, -1, -2, -1, -1, 4,
0, -3, -3, -3],
[-1, -3, -1, -1, -3, 1, -1, -3, 4, -1, -2, 2, -2, 1, 0, 0,
-1, -3, -3, -2],
[ 0, -3, 6, 2, -4, -2, -1, -4, -1, -4, -3, 2, -2, -1, -2, 0,
-1, -3, -4, -3],
[-2, -4, -1, 0, -4, -2, -1, -3, 5, -3, -2, 2, -2, 0, 1, -1,
-1, -3, -3, -2],
[-2, -4, -2, -1, -3, -3, -1, -4, 3, -3, -2, -1, -2, 0, 6, -1,
-2, -3, -3, -2],
[ 5, -1, -2, -2, -3, -1, -2, -2, -2, -2, -2, -2, -2, -2, -2, 0,
-1, -1, -3, -2],
[-2, -2, -3, -2, -1, -3, -2, 0, 0, 3, 0, -2, -3, 2, 2, -2,
0, -1, -3, -2],
[-2, -4, -1, 0, -4, -2, -1, -3, 6, -3, -2, -1, -2, 0, 1, -1,
-1, -3, -3, -2],
[-2, -2, -4, -4, 4, -4, -3, 1, -3, 2, 1, -4, -4, -3, -3, -3,
-2, 3, -2, 0],
[ 3, 1, -3, -2, -2, -2, -3, 2, -2, 2, 0, -3, -3, -2, 0, -1,
-1, 0, -3, -2],
[-2, -4, -2, 0, -4, -3, -1, -4, 5, -3, -2, -1, -2, 2, 3, -1,
-2, -3, -3, -3],
[-2, -4, -2, 1, -4, -3, -1, -4, 5, -3, -2, -1, -2, 0, 4, -1,
-2, -3, -3, -3],
[-2, -4, -2, -1, -4, -3, -1, -4, 3, -3, -2, -1, -3, 0, 6, -1,
-2, -3, -3, -3],
[-2, -2, -4, -4, -1, -4, -4, 2, -3, 4, 1, -4, -4, -3, -3, -3,
-2, 2, -3, -2],
[-1, -3, -2, -3, -4, 6, -3, -5, -2, -4, -4, -1, -3, -3, -3, -1,
-2, -4, -3, -4],
[-1, -2, -2, -1, -3, -2, 2, -2, -1, -2, -2, -1, -2, -1, -2, 3,
5, -1, -3, -2],
[-2, -4, -2, -1, 1, -3, 8, -1, -2, -1, -2, -1, -3, -1, -2, 0,
-2, -2, -2, 1],
[-2, -3, -2, -1, -3, -1, -2, 2, 3, -1, 1, -2, -2, 3, 2, -1,
0, -1, -3, -2],
[ 0, -4, -2, 1, -4, -3, -1, -4, 1, -3, -2, -1, -3, 0, 6, -1,
-2, -3, -3, -3],
[ 5, -1, -2, -2, -3, 2, -2, -2, -2, -3, -2, -2, -2, -2, -2, 1,
-1, -1, -3, -3],
[-2, -3, -2, 0, -3, -3, -2, -2, 5, -1, -2, -1, -2, 0, 1, -1,
-1, 0, -3, -3],
[ 1, -3, -2, 1, -3, 1, -2, -3, 2, -3, -2, -2, -2, 0, 5, -1,
-2, -1, -3, -3],
[-2, -4, -1, 1, -4, -2, -1, -3, 6, -3, -2, -1, -2, 1, 1, -1,
-2, -3, -4, -3],
[-2, -3, -2, 1, -3, -3, -2, 0, 1, -2, -1, -2, -3, 0, 5, -2,
-2, 2, -3, -2],
[ 0, -4, 2, 5, -4, -3, -1, -4, 0, -4, -3, -1, -2, 1, 1, -1,
-2, -3, -3, -3],
[-2, -3, 2, 5, -3, -3, -1, -3, 2, -1, -2, -1, -2, 1, -1, -1,
-2, -3, -3, -2],
[-2, -2, -4, -3, -1, -4, -3, 1, -3, 4, 5, -3, -3, -2, -2, -3,
-2, 0, -2, -2],
[ 2, -2, -1, 1, -3, 1, -2, -3, -1, -3, -2, 1, -2, 1, -2, 4,
1, -2, -3, -3],
[-2, -3, 1, 1, -4, 0, -1, -4, 0, -4, -3, 5, -2, 1, 2, 2,
-1, -3, -4, -3],
[ 0, -2, -4, -3, -1, -4, -4, 3, -3, 0, 3, -4, -3, -3, -3, -2,
-1, 4, -3, -2],
[-2, -2, -3, -3, -1, -4, -3, 2, -3, 4, 1, 0, -3, 1, -3, -3,
-2, 1, -3, -2],
[ 2, 2, -3, -2, -3, -2, -2, -1, -1, 0, -1, -2, -2, 1, 3, -1,
0, 0, -3, -2],
[ 3, -2, -1, 2, -4, -2, -2, -3, 3, -3, -2, -1, -2, 1, -1, 2,
-1, -2, -3, -3],
[ 0, -2, -2, -1, -2, -2, -2, -1, 1, 0, 5, -2, 1, 3, -1, 2,
-1, -1, -3, -2],
[-1, -3, -2, 0, -2, -2, -1, -3, 1, -2, -2, -1, -2, 1, 6, 0,
-1, -2, -2, -2],
[ 1, -2, -1, 0, -2, -1, -1, -2, 4, -1, -1, -1, -1, 0, 2, 1,
-1, -2, -2, -2],
[ 3, -1, -2, -1, -2, 1, 2, -2, 1, -1, -1, -1, -1, -1, -1, 0,
-1, -1, -2, -1],
[ 2, -1, -1, -1, -1, 2, 3, -1, -1, -1, -1, -1, -1, 1, -1, 0,
-1, -1, -1, -1],
[ 2, -1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, 0,
1, -1, -1, -1],
[ 0, -1, -1, 0, -1, 2, 3, -1, 2, -1, -1, 0, -1, 0, 0, 0,
-1, -1, -1, -1],
[ 2, -1, -1, 1, -1, -1, -1, -1, 2, -1, -1, -1, -1, 0, 0, 0,
0, -1, -1, -1],
[ 0, -1, 2, 1, -1, -1, -1, -1, 1, -1, -1, 0, -1, 0, 0, 0,
1, -1, -1, -1],
[ 0, -1, 0, 1, -1, 0, 0, -1, 0, -1, 0, 0, 2, 0, 0, 0,
0, 0, -1, 0],
[ 0, -1, 1, 1, -1, 0, 0, -1, 1, -1, 0, 0, 0, 0, 0, 0,
0, 0, -1, 0],
[ 0, -1, 0, 2, -1, 0, 0, -1, 2, -1, 0, 0, 0, 0, 0, 0,
0, 0, -1, 0],
[ 0, -1, 0, 0, -1, 1, 1, -1, 2, -1, 0, 0, 0, 0, 0, 0,
0, -1, -1, 0]])
'''
return None
if __name__ == '__main__':
main()
'''
'''
<file_sep>import numpy as np
from tests import matrices
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.stringM = string1
self.stringN = string2
self.gap_penalty = gap_penalty
self.substituion_matrix = matrix
self.score_matrix = np.zeros((len(self.stringN) + 1, len(self.stringM) + 1), dtype=np.int)
self.traceback = {}
self.paths = None
self.score = None
self.align()
def get_paths(self, n, m):
if n == 0 and m == 0: # self.traceback.get((n_start, m), None) is None:
# end of path
return [("", "")]
paths = []
#score = self.score_matrix[n,m] # prepended with 0
for n_pred,m_pred in self.traceback[(n, m)]:
for seq_trail_m, seq_trail_n in self.get_paths(n_pred, m_pred):
if n_pred == n-1 and m_pred == m-1:
# n index for the string with one additional element in front
paths.append((seq_trail_m + self.stringM[m-1], seq_trail_n + self.stringN[n-1]))
elif n_pred == n and m_pred == m-1:
# n index for the string with one additional element in front
paths.append((seq_trail_m + self.stringM[m-1], seq_trail_n + "-"))
elif n_pred == n-1 and m_pred == m:
# n index for the string with one additional element in front
paths.append((seq_trail_m + "-", seq_trail_n + self.stringN[n-1]))
else:
print("ERROR! Did not find correct neighbor for n_pred {} n {} m_pred {} m {}".format(n_pred, n, m_pred, m))
return paths
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
#Sm,n = max {
# Sm-1,n-1 + d(m, n)
# Sm-1,n + Gap
# Sm,n-1 + Gap
# }
M = len(self.stringM)
N = len(self.stringN)
for n in range(0,N+1):
self.score_matrix[n,0] = n * self.gap_penalty
self.traceback[(n,0)] = [(n-1,0)]
for m in range(1,M+1):
self.score_matrix[0,m] = m * self.gap_penalty
self.traceback[(0,m)] = [(0,m-1)]
# Fill
for n in range(1,N+1):
for m in range(1,M+1):
# Zu denjenigen Nachbarzellen, die zum o.g. Maximum geführt haben, wird eine Pfeilmarkierung gesetzt. http://webclu.bio.wzw.tum.de/binfo/edu/tutorials/pairalign/glob_ali.html
predecessor_opts = [
(n-1,m-1, self.score_matrix[n-1,m-1] + self.substituion_matrix[self.stringN[n-1]][self.stringM[m-1]]),
(n,m-1, self.score_matrix[n,m-1] + self.gap_penalty),
(n-1,m, self.score_matrix[n-1,m] + self.gap_penalty)
]
max_val = max([val for _,_,val in predecessor_opts])
self.score_matrix[n,m] = max_val
pred_list = []
self.traceback[(n,m)] = pred_list
for n_pred,m_pred,val in [(n_pred,m_pred,val) for n_pred,m_pred,val in predecessor_opts if val == max_val]:
# add to traceback
pred_list.append((n_pred,m_pred))
print("self.score_matrix \n{}".format(self.score_matrix))
# Traceback
# Zur Formulierung aller möglichen globalen Alignments ist das sog. "Traceback" durchzuführen.
# Dabei werden alle per Pfeilmarkierung erreichbaren Pfade von der Zelle rechts unten nach links oben verfolgt und markiert. Das Alignment wird aus dem Traceback ermittelt,
# indem (von links oben nach rechts unten) aus allen durch einen Tracebackpfad berührten Zellen der Matrix die zugehörigen Zeichen der beiden Sequenzen entnommen werden.
# Wird dabei eine Position einer Sequenz mehrfach berührt, so wird stattdessen eine Lücke (-) geschrieben.
self.score = self.score_matrix[N,M]
self.paths = self.get_paths(N,M)
print("Paths: {}".format(self.paths))
def get_best_score(self):
"""
:return: the highest score for the aligned strings, int
"""
return self.score
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score
"""
return len(self.paths)
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
return self.paths
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as a list of lists
"""
return self.score_matrix
def main():
print('PDB parser class.')
alignment = GlobalAlignment("SCYTHE", "SCTHE", -6, matrices.MATRICES['blosum'])
print(alignment.get_best_score())
return None
if __name__ == '__main__':
main()
'''
self.score_matrix
[[ 0 -6 -12 -18 -24 -30 -36]
[ -6 0 -6 -12 -18 -24 -30]
[-12 -6 -4 -10 -16 -22 -28]
[-18 -12 -7 -5 -11 -17 -23]
[-24 -18 -12 -7 -5 -11 -17]
[-30 -19 -13 -7 -2 0 -6]]
Paths: [(-50, '-SCTHE', 'SCYTHE'), (-44, 'S-CTHE', 'SCYTHE'), (-42, 'SC-THE', 'SCYTHE'), (-37, 'SCT-HE', 'SCYTHE'), (-31, 'SCTH-E', 'SCYTHE'), (-20, 'SCTHE-', 'SCYTHE')]
# expected: "alignments": [["SCYTHE", "SC-THE"]]
# "best_score": 25,
[('ARN-DE-Q--', 'AVNCCEGQHI'), ('ARND-E-Q--', 'AVNCCEGQHI')] expected
{('AVNCCEGQHI', 'ARN-DE-Q--'), ('AVNCCEGQHI', 'ARND-E-Q--')}
{('AVNCDE-Q-I', 'ARN-CEGQH-'), ('AVNC-E-Q-I', 'ARNDCEGQH-')} expected
{('AVNCCEGQHI', 'ARN-DE-Q--'), ('AVNCCEGQHI', 'ARND-E-Q--')}
{('AVNCDE-Q-I', 'ARN-CEGQH-'), ('AVNC-E-Q-I', 'ARNDCEGQH-')} expected
{('AVNCCEGQHI', 'ARN-DE-Q--'), ('AVNCCEGQHI', 'ARND-E-Q--')}
{('AVNCDE-Q-I', 'ARN-CEGQH-'), ('AVNC-E-Q-I', 'ARNDCEGQH-')} expected
{('AVNCCEGQHI', 'ARN-DE-Q--'), ('AVNCCEGQHI', 'ARND-E-Q--')}
{('SC-THE', 'SCYTHE')} expected
{('SCYTHE', 'SC-THE')}
'''<file_sep>import numpy as np
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substituion_matrix = matrix
self.score_matrix = np.zeros(
(len(string2) + 1, len(string1) + 1), dtype=np.int)
self.align()
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
for y in range(0, len(self.string1)+1):
self.score_matrix[0, y] = self.gap_penalty*y # fill first string1 with gaps (loop through string1s)
for x in range(0, len(self.string2)+1):
self.score_matrix[x, 0] = self.gap_penalty*x # fill first string2 with gaps (loop through string1s)
self.score_matrix[0, 0] = 0
for x in range(1, len(self.string2)+1): # loop through string2s
for y in range(1, len(self.string1)+1): # loop through string1s
diagonal = self.score_matrix[x-1, y-1] + \
self.substituion_matrix[self.string2[x-1]][self.string1[y-1]]
horizontal = self.score_matrix[x, y-1] + self.gap_penalty
vertical = self.score_matrix[x-1, y] + self.gap_penalty
self.score_matrix[x, y] = max(diagonal, horizontal, vertical)
return self.score_matrix
def get_best_score(self):
"""
:return: the highest score for the aligned strings, int
"""
return self.score_matrix[len(self.string2), len(self.string1)]
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score
"""
return len(self.get_alignments())
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
result = []
alignments = []
x = len(self.string2)
y = len(self.string1)
self.get_alignments_recursive(result, '', x, y)
for alignment in result:
alignments.append((self.string1, alignment))
print(alignments)
return alignments
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as a list of lists
"""
return self.score_matrix
def get_alignments_recursive(self, list, currentword, x, y):
if x == 0:
if y == 0:
list.append(currentword)
return list
else:
currentscore = self.score_matrix[x, y]
diagonal = currentscore - \
self.substituion_matrix[self.string2[x-1]][self.string1[y-1]]
horizontal = currentscore - self.gap_penalty
vertical = currentscore - self.gap_penalty
if diagonal == self.score_matrix[x-1, y-1]:
self.get_alignments_recursive(list, self.string1[y-1] + currentword, x-1, y-1)
if horizontal == self.score_matrix[x, y-1]:
self.get_alignments_recursive(list, '-' + currentword, x, y-1)
if vertical == self.score_matrix[x-1, y]:
self.get_alignments_recursive(list, '-' + currentword, x-1, y)
<file_sep>##############
# Exercise 2.6
##############
from collections import Counter
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def read_text(self):
with open('temp_fasta.txt') as f:
self.__sequences = f.read().splitlines()
def read_fasta(self, path):
fasta = list()
test = list()
head = list()
with open(path) as f:
for line in f:
line = line.strip()
if not line:
continue
if line.startswith(">"):
curr_seq = line[1:]
head.append(curr_seq)
if curr_seq not in fasta:
test.append(''.join(fasta))
fasta = []
continue
fasta.append(line)
if fasta:
test.append(''.join(fasta))
test = test[1:]
refactor_keys = str.maketrans('', '', '*')
out_list = [s.translate(refactor_keys) for s in test]
file = open("temp_fasta.txt", "w")
for item in out_list:
file.write("%s\n" % item)
file.close()
self.read_text()
return head, out_list
def get_counts(self):
return len(self.__sequences)
def get_average_length(self):
sum = 0
count = self.get_counts()
for l in self.__sequences:
sum = sum + len(l)
return sum / count
def aa_dist(self, aa_seq):
counted = Counter(aa_seq)
for k in counted:
counted[k] /= len(aa_seq)
return counted
def get_abs_frequencies(self):
# return number of occurences not normalized by length
summary = list()
for line in self.__sequences:
for char in line:
summary.append(char)
return dict((x, summary.count(x)) for x in set(summary))
def get_av_frequencies(self):
# return number of occurences normalized by length
abs_freq = self.get_abs_frequencies()
sum = 0
for k, v in abs_freq.items():
sum += v
av_freq = dict()
for k, v in abs_freq.items():
v /= sum
av_freq.setdefault(k)
av_freq[k] = v
return av_freq
<file_sep>import numpy as np
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
self.sequences = sequences
for seq in sequences:
if len(seq) != len(sequences[0]):
raise TypeError('ERROR')
if( len(sequences) == 0):
raise TypeError('ERROR')
if any((c not in list(ALPHABET)) for seq in sequences for c in seq ):
raise TypeError('ERROR')
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
if bg_matrix:
bg_array = self.get_background_frequency(bg_matrix)
#
primary_length = len(self.get_primary_sequence())
primary_length_w_gaps = self.sequences[0];
non_empty_index = []
for i in range(len(primary_length_w_gaps)):
if(primary_length_w_gaps[i] != '-'):
non_empty_index.append(i)
pssm = np.zeros([primary_length, len(ALPHABET)-1])
gaps = np.zeros([primary_length])
weights = self.get_sequence_weights()
# checking gaps and weights
for seq_index, seq in enumerate(self.sequences):
for i in range(len(self.sequences[0])):
if i in non_empty_index:
if seq[i] != '-':
if use_sequence_weights:
pssm[non_empty_index.index(i)][AA_TO_INT[seq[i]]] += weights[seq_index]
else:
pssm[non_empty_index.index(i)][AA_TO_INT[seq[i]]] += 1
elif seq[i] == '-' and redistribute_gaps:
if use_sequence_weights:
gaps[non_empty_index.index(i)] += weights[seq_index]
else:
gaps[non_empty_index.index(i)] += 1
if redistribute_gaps:
for i in range(len(pssm)):
if(gaps[i] != 0):
for j in range(len(pssm[i])):
if bg_matrix:
pssm[i][j] += bg_array[j] * gaps[i]
else:
pssm[i][j] += 0.05 * gaps[i]
#adding pseudocounts
if add_pseudocounts:
L = len(self.get_primary_sequence())
g = np.zeros((L, 20))
if bg_matrix:
q = bg_matrix
P = self.get_background_frequency(bg_matrix)
for i in range(L):
for a in range(20):
for j in range(20):
if bg_matrix:
g[i][a] += ((pssm[i][j] * q[a][j]) / P[j])
else:
g[i][a] += (pssm[i][j] * 0.05)
alpha = self.get_number_of_observations() - 1
for i in range(L):
for j in range(20):
pssm[i][j] = ((alpha * pssm[i][j]) + (beta * g[i][j])) / (alpha + beta)
pssm = pssm / np.sum(pssm, axis=1, keepdims = True)
if bg_matrix:
for i in range(len(pssm)):
for j in range(len(pssm[0])):
pssm[i][j] = pssm[i][j] / bg_array[j]
else:
pssm = pssm / 0.05
pssm = np.log2(pssm) * 2
pssm[pssm == float("-inf")] = -20
return np.rint(pssm).astype(np.int64)
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return (len(self.sequences), len(self.sequences[0]))
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
return self.sequences[0].replace("-","")
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
seq_width = len(self.sequences[0])
seq_count = len(self.sequences)
weights = np.zeros([seq_width, seq_count])
pssm = np.zeros([seq_width, len(ALPHABET)])
for i in range(seq_width):
for seq in self.sequences:
pssm[i][AA_TO_INT[seq[i]]] += 1
for i in range(seq_width):
for seq_index, seq in enumerate(self.sequences):
if(np.count_nonzero(pssm[i]) > 1):
weights[i,seq_index] = 1/(pssm[i][AA_TO_INT[seq[i]]] * np.count_nonzero(pssm[i]))
else:
weights[i,seq_index] = 0
weights = np.sum(weights, axis=0)
return weights
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
gapped_pssm = np.zeros([len(self.sequences[0]), len(ALPHABET)])
for i in range(len(self.sequences[0])):
for seq in self.sequences:
gapped_pssm[i][AA_TO_INT[seq[i]]] += 1
L = len(self.sequences[0])
obs_count = 0
for i in range(L):
obs_count += np.count_nonzero(gapped_pssm[i])
obs_count = obs_count / L
return obs_count
def get_background_frequency(self, bg_matrix):
background_freq = np.zeros(20)
for i in range(len(bg_matrix)):
background_freq[i] = sum(bg_matrix[i])
return background_freq
<file_sep>import numpy as np
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.traceback_matrix = np.ones((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.alignments = []
self.align()
def align(self):
"""
Align given strings using the Smith-Waterman algorithm.
NB: score matrix and the substitution matrix are different matrices!
"""
self.score_matrix[0][0] = 0
for i in range(len(self.string1)):
self.score_matrix[0][i+1] = 0
self.traceback_matrix[0][i+1] *= 7
for i in range(len(self.string2)):
self.score_matrix[i+1][0] = 0
self.traceback_matrix[i+1][0] *= 5
for i in range(len(self.string2)): # indices running over strings, add +1 for score and traceback matrices
for j in range(len(self.string1)):
match_score = self.substitution_matrix[self.string2[i]][self.string1[j]]
score_diag = self.score_matrix[i][j] + match_score
score_hgap = self.score_matrix[i][j+1] + self.gap_penalty
score_vgap = self.score_matrix[i+1][j] + self.gap_penalty
best = max(score_diag, score_hgap, score_vgap, 0)
self.score_matrix[i+1][j+1] = best
if score_diag == best:
self.traceback_matrix[i+1][j+1] *= 3
if score_hgap == best:
self.traceback_matrix[i+1][j+1] *= 5
if score_vgap == best:
self.traceback_matrix[i+1][j+1] *= 7
self.best = 0
self.best_pos = (0,0)
for i in range(len(self.string2)):
for j in range(len(self.string1)):
if self.score_matrix[i+1,j+1] > self.best:
self.best = self.score_matrix[i+1][j+1]
self.best_pos = (i+1,j+1)
if self.best != 0:
self.alignments = self.traceback(self.best_pos[0], self.best_pos[1])
def traceback(self, i, j):
if self.score_matrix[i][j] == 0:
return [('','')]
alignments = []
if self.traceback_matrix[i][j] % 3 == 0:
for alignment in self.traceback(i-1, j-1):
alignments.append((alignment[0] + self.string1[j-1], alignment[1] + self.string2[i-1]))
if self.traceback_matrix[i][j] % 5 == 0:
for alignment in self.traceback(i-1, j):
alignments.append((alignment[0] + '-', alignment[1] + self.string2[i-1]))
if self.traceback_matrix[i][j] % 7 == 0:
for alignment in self.traceback(i, j-1):
alignments.append((alignment[0] + self.string1[j-1], alignment[1] + '-'))
return alignments
def has_alignment(self):
"""
:return: True if a local alignment has been found, False otherwise
"""
return len(self.alignments) > 0
def get_alignment(self):
"""
:return: alignment represented as a tuple of aligned strings
"""
if self.has_alignment():
return self.alignments[0]
return ('','')
def is_residue_aligned(self, string_number, residue_index):
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been alined
False otherwise
"""
if not self.has_alignment():
return False
alignment = self.alignments[0]
last = 0
aligned_string = ''
if string_number == 1:
last = self.best_pos[1]
aligned_string = alignment[0]
else:
last = self.best_pos[0]
aligned_string = alignment[1]
aligned_string = aligned_string.replace('-', '')
last = last - 1
first = last - len(aligned_string)
if residue_index >= first and residue_index <= last:
return True
return False
<file_sep>import numpy as np
from pathlib import Path
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.sequence_list = []
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
self.sequence_list.append(sequence)
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
sub_list = []
for sequence in self.sequence_list:
if word in sequence:
sub_list.append(sequence)
return sub_list
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
word_set = set([])
seq_count = 0
for sequence in self.sequence_list:
current_word_set = set([])
for idx in range(len(sequence)-2):
word = sequence[idx:idx+3]
if word not in current_word_set:
seq_count += 1
word_set.add(word)
current_word_set.add(word)
word_count = 0
for sequence in self.sequence_list:
diff_words = set([])
for idx in range(len(sequence)-2):
diff_words.add(sequence[idx:idx+3])
word_count += len(diff_words)
return tuple(list([len(self.sequence_list), len(word_set), round(word_count/len(self.sequence_list)), round(seq_count/len(word_set))]))
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.substitution_matrix = substitution_matrix
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
if pssm is not None:
seeds = set([])
for i in range(len(pssm) - 2):
for ind1 in range(len(pssm[i])):
for ind2 in range(len(pssm[i])):
for ind3 in range(len(pssm[i])):
seed = INT_TO_AA[ind1] + INT_TO_AA[ind2] + INT_TO_AA[ind3]
score = (pssm[i][ind1] + pssm[i + 1][ind2] + pssm[i + 2][ind3])
if score >= T:
seeds.add(seed)
return seeds
else:
seeds = set([])
for k in range(len(sequence)-2):
for ind1 in range(20):
for ind2 in range(20):
for ind3 in range(20):
seed = INT_TO_AA[ind1] + INT_TO_AA[ind2] + INT_TO_AA[ind3]
first = AA_TO_INT[sequence[k]]
second = AA_TO_INT[sequence[k+1]]
third = AA_TO_INT[sequence[k+2]]
score = (self.substitution_matrix[first][ind1] + self.substitution_matrix[second][ind2] + self.substitution_matrix[third][ind3])
if score >= T:
seeds.add(seed)
return seeds
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]
<file_sep>import numpy as np
from tests.matrices import MATRICES
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros(
(len(string2) + 1, len(string1) + 1), dtype=np.int)
self.align()
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
for i in range(len(self.string2)+1):
self.score_matrix[i][0] = self.gap_penalty*i
for j in range(len(self.string1)+1):
self.score_matrix[0][j] = self.gap_penalty*j
for i in range(1, len(self.string2)+1):
for j in range(1, len(self.string1)+1):
self.score_matrix[i][j] = max(
self.score_matrix[i-1][j-1] +
self.substitution_matrix[self.string2[i-1]
][self.string1[j-1]],
self.score_matrix[i-1][j]+self.gap_penalty,
self.score_matrix[i][j-1]+self.gap_penalty
)
def get_best_score(self):
"""
:return: the highest score for the aligned strings, int
"""
return self.score_matrix[len(self.string2)][len(self.string1)]
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score
"""
return len(self.get_alignments())
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
return self.find_alignment("", "", len(self.string2), len(self.string1))
def find_alignment(self, alignment1, alignment2, i, j):
if i == 0 and j == 0:
return [(alignment1, alignment2)]
alignments = []
if i > 0 and j > 0 and self.score_matrix[i][j] == self.score_matrix[i-1][j-1]+self.substitution_matrix[self.string2[i-1]][self.string1[j-1]]:
alignments.extend(self.find_alignment(
self.string1[j-1]+alignment1, self.string2[i-1]+alignment2, i-1, j-1))
if i > 0 and self.score_matrix[i][j] == self.score_matrix[i-1][j]+self.gap_penalty:
alignments.extend(self.find_alignment(
'-'+alignment1, self.string2[i-1]+alignment2, i-1, j))
if j > 0 and self.score_matrix[i][j] == self.score_matrix[i][j-1]+self.gap_penalty:
alignments.extend(self.find_alignment(
self.string1[j-1]+alignment1, '-'+alignment2, i, j-1))
return alignments
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as a list of lists
"""
return self.score_matrix
def main():
align = GlobalAlignment("SCYTHE", "SCTHE", -6, MATRICES["blosum"])
print(align.get_alignments())
return None
if __name__ == '__main__':
main()
<file_sep>import numpy as np
from pathlib import Path
import itertools
import re
from collections import Counter
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.sequences = []
def find_words(self, word_len=3):
self.unique_words = set() # Unique words appearing in all sequences in DB.
self.uniques_in_sequences = [] # All unique words per sequence.
self.uniques_in_DB = [] # uniques_in_sequences concatenated in one list.
# Searching for patterns is unnecessary. Just extract all substrings of length word_len from all sequences.
"""pattern = re.compile('|'.join(self.words_possible))
for seq in self.sequences:
uniques_in_sequence = set()
uniques_in_sequence.update(pattern.findall(seq, pos=0))
uniques_in_sequence.update(pattern.findall(seq, pos=1))
uniques_in_sequence.update(pattern.findall(seq, pos=2))
self.unique_words.update(uniques_in_sequence)
self.uniques_in_sequences.append(uniques_in_sequence)
self.uniques_in_DB += list(uniques_in_sequence) """
for seq in self.sequences:
uniques_in_sequence = set()
for i in range(len(seq)-(word_len-1)):
uniques_in_sequence.update([seq[i:i+word_len]])
self.unique_words.update(uniques_in_sequence)
self.uniques_in_sequences.append(uniques_in_sequence)
self.uniques_in_DB += list(uniques_in_sequence)
# unique_words.update(pattern.findall(''.join(self.sequences), pos=0))
return self.unique_words
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
self.sequences.append(sequence)
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
return [seq for seq in self.sequences if word in seq]
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
# all_words = set([find_words(seq) for seq in self.sequences])
self.words_in_DB = self.find_words()
avg_word_per_seq = sum([len(words) for words in self.uniques_in_sequences])/len(self.uniques_in_sequences)
word_occs_in_seqs = Counter(self.uniques_in_DB)
avg_seq_per_word = sum(list(word_occs_in_seqs.values()))/len(self.unique_words)
return (len(self.sequences), len(self.unique_words), int(round(avg_word_per_seq)), int(round(avg_seq_per_word)))
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.sub_mat = substitution_matrix
self.words_possible = self.generate_words() # All possible amino acid subsequences of length 3
def generate_words(self, word_len=3):
"""Generates all possible amino acid subsequences of length word_len.
Keyword Arguments:
word_len {int} -- Length of the words (default: {3})
Returns:
[List] -- List containing the generated words.
"""
perms = []
combs = itertools.combinations_with_replacement(ALPHABET, word_len)
for c in combs:
perms += [''.join(e) for e in set(itertools.permutations(c, word_len))]
return perms
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
self.valid_words = []
self.start_positions = []
remaining_words = self.words_possible
reference = (sequence if sequence else pssm)
for pos in range(0, len(reference)-2):
words_found = np.array([(self.calculate_score(pos, word, T, sequence=sequence, pssm=pssm) >= T) for word in remaining_words])
remaining_words = np.array(remaining_words)
self.valid_words += list(np.take(remaining_words, np.where(words_found==True)[0]))
self.start_positions += [pos for i in self.valid_words]
remaining_words = list(np.take(remaining_words, np.where(words_found==False)[0])) # Only consider words that didn't pass T at this position.
# map(self.calculate_score(pos, T), self.words_possible, range(0, len(sequence)-2), ) # A failed attempt at being smart
return self.valid_words
# Parameter T is not used and can be removed
def calculate_score(self, start_pos, word, T, sequence=None, pssm=None):
indices = list(range(start_pos, start_pos+len(word)))
sum = 0
if sequence:
# Optimize!
for i in indices:
try:
sum += self.sub_mat[AA_TO_INT[sequence[i]]][AA_TO_INT[word[i-indices[0]]]]
except IndexError:
# print(str(start_pos) + ", " + word + ", " + sequence)
print(len(word), len(sequence))
""" score = sum([self.sub_mat[AA_TO_INT[sequence[indices[0]]]][AA_TO_INT[word[0]]],
self.sub_mat[AA_TO_INT[sequence[indices[1]]]][AA_TO_INT[word[1]]],
self.sub_mat[AA_TO_INT[sequence[indices[2]]]][AA_TO_INT[word[2]]]]) """
else:
# Optimize!
for i in indices:
sum += pssm[i][AA_TO_INT[word[i-indices[0]]]]
""" score = sum([pssm[indices[0]][AA_TO_INT[word[0]]],
pssm[indices[1]][AA_TO_INT[word[1]]],
pssm[indices[2]][AA_TO_INT[word[2]]]]) """
return int(sum)
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
# Find target sequences
""" db_sequences = blast_db.get_sequences()
targets = {seq:set() for seq in db_sequences}
for word in self.valid_words:
for db_sequence in db_sequences:
if word in db_sequence:
targets[db_sequence].update(word) """
HSP_scores = []
if query:
self.get_words(sequence=query, T=T)
for i in range(len(self.valid_words)):
# for word in self.valid_words:
word_targets = blast_db.get_sequences(self.valid_words[i])
for wt in word_targets:
HSP_scores.append(self.get_HSP(self.valid_words[i], self.start_positions[i], query, wt, T, X, S, sequence=query, pssm=pssm))
return HSP_scores
""" d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)] """
def get_HSP(self, word, query_start_pos, query, target, T, X, S, sequence=None, pssm=None):
start_pos = query_start_pos
start_pos_target = target.find(word)
# Make more concise!
if sequence:
# First extend right
high_score = new_score = score = self.calculate_score(start_pos, word, T, sequence=sequence)
high_start_pos = start_pos_target
high_end_pos = high_start_pos + len(word)
next_char_pos = start_pos_target+len(word)
score_diff = score-new_score
# score>=T and
while next_char_pos<len(target) and next_char_pos<len(query) and (start_pos+len(word))<len(query) and score_diff<X:
word = word+target[start_pos_target+len(word)]
next_char_pos = start_pos_target+len(word)
score=new_score
new_score = self.calculate_score(start_pos, word, T, sequence=sequence)
score_diff = score-new_score
if new_score > high_score:
high_score = new_score
high_start_pos = start_pos_target
high_end_pos = start_pos_target + len(word)
# Extending left
start_pos_target-=1
score = high_score
word = word[:high_end_pos]
i=1
# score>=T and
while start_pos_target>0 and start_pos-i>0 and score_diff<X:
word = target[start_pos_target]+word
start_pos_target-=1
score=new_score
new_score = self.calculate_score(start_pos-i, word, T, sequence=sequence)
score_diff = score-new_score
i+=1
if new_score > high_score:
high_score = new_score
high_start_pos = start_pos_target
high_end_pos = start_pos_target + len(word)
HSP_candidtate_target = target[high_start_pos:high_end_pos]
HSP_candidtate_query = query[start_pos:start_pos+len(HSP_candidtate_target)]
HSP_score = self.calculate_score(0, HSP_candidtate_target, S, sequence=HSP_candidtate_query)
return HSP_score>=S
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]
<file_sep>import numpy as np
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
if len(sequences) <= 0:
print("No sequences")
raise TypeError("No sequences")
length = len(sequences[0])
for seq in sequences:
if len(seq) != length:
print("Different sequence lengths")
raise TypeError("Different sequence lengths")
for char in seq:
if char not in ALPHABET:
print("Invalid character")
raise TypeError("Invalid character")
self.sequence_count = len(sequences)
self.msa_length = len(sequences[0])
self.primary = sequences[0].replace('-', '')
self.r = np.zeros(self.msa_length)
self.sequences = np.ndarray((self.sequence_count, self.msa_length))
for j, seq in enumerate(sequences):
arr = []
i = 0
for aa in seq:
arr.append(AA_TO_INT[aa])
i += 1
self.sequences[j] = np.array(arr)
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
weights = self.get_sequence_weights()
bg_freqs = np.array([0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05])
if bg_matrix is not None:
bg_freqs = np.sum(bg_matrix, axis=1)
alphabet = 20
if redistribute_gaps:
alphabet = 21
count_matrix = np.zeros((self.msa_length, alphabet))
pssm = np.zeros((self.msa_length, 20))
for row in range(0, alphabet):
for col in range(0, self.msa_length):
current_col = self.sequences[:, col]
count = 0.0
if use_sequence_weights:
#print("AA: ", AA_TO_INT[ALPHABET[row]], "COL: ", current_col)
non_zero = np.nonzero(current_col == AA_TO_INT[ALPHABET[row]])
#print("INDICES: ", non_zero[0])
for index in non_zero[0]:
#print(index)
count += weights[index]
else:
count = np.count_nonzero(current_col == AA_TO_INT[ALPHABET[row]])
count_matrix[col, row] = count
if redistribute_gaps:
for row in range(0, 20):
for col in range(0, self.msa_length):
#print(col, row)
count_matrix[col, row] += count_matrix[col, alphabet-1]*bg_freqs[row]
pseudo_counts = np.zeros((self.msa_length, alphabet))
if add_pseudocounts:
for row in range(0, 20):
for col in range(0, self.msa_length):
current_col = self.sequences[:, col]
total = 0
if bg_matrix is None:
bgf = 0.05/20
else:
x = self.sequences[row][col].astype(int)
#print(x)
bgf = bg_matrix[row][x-1]
for aa in range(0, self.msa_length):
total += (count_matrix[aa, row]/bg_freqs[row]) * bgf
#print(total)
pseudo_counts[col, row] = total
alpha = self.get_number_of_observations()
for row in range(0, 20):
for col in range(0, self.msa_length):
a = alpha * count_matrix[col, row]
b = beta * pseudo_counts[col, row]
#print(a, b)
count_matrix[col, row] = (a + b) / (alpha + beta)
for row in range(0, 20):
for col in range(0, self.msa_length):
count = count_matrix[col, row]
if redistribute_gaps:
sum_count = self.sequence_count
elif use_sequence_weights:
sum_count = np.sum(count_matrix[col])
else:
sum_count = np.count_nonzero(self.sequences[:, col] != 20)
if count == 0:
print("Row: ", row, "Col: ", col)
print("COUNT ", count, " SUM: ", sum_count, " FREQ: ", bg_freqs[row])
pssm[col, row] = 2 * np.log2((count / sum_count) / bg_freqs[row])
if np.isneginf(pssm[col, row]):
pssm[col, row] = -20
deletions = 0
for col in range(0, self.msa_length):
if self.sequences[0, col] == 20.0:
pssm = np.delete(pssm, col-deletions, 0)
deletions += 1
#print(pssm)
return np.rint(pssm).astype(np.int64)
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return (self.sequence_count, self.msa_length)
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
return self.primary
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
weights = np.zeros(self.sequence_count)
#print("Shape: ", np.shape(self.sequences))
#print("Length: ", self.msa_length, " Count: ", self.sequence_count)
s = np.zeros(np.shape(self.sequences))
for i in range(0, self.msa_length):
length = len(np.unique(self.sequences[:, i]))
self.r[i] = length
#print(self.r)
for col in range(0, self.msa_length):
for row in range(0, self.sequence_count):
s[row, col] = np.count_nonzero(self.sequences[:, col] == self.sequences[row, col])
#print(s)
single_weights = np.zeros(np.shape(self.sequences))
for row in range(0, self.sequence_count):
for col in range(0, self.msa_length):
if self.r[col] > 1:
single_weights[row, col] = 1 / (self.r[col] * s[row, col])
#print(single_weights)
weights = np.sum(single_weights, axis=1)
#print(weights)
return weights.astype(np.float64)
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
num_obs = (1 / self.msa_length) * np.sum(self.r)
#print(num_obs)
return num_obs.astype(np.float64)
valid = [
"--------MVATGLFVGLNKGHVVTKRE----------QPPRPNNRKGKTSKRTIFIRNLIKEVAGQAPYEKRITELLKVGKD---KRALKVAKRKLGTHKRAKRKREEMSSVLRKMRSGGAGASEKKK----",
"----MTTPQVKTGLFVGLNKGHVVTRRE----------LAPRPRSRKGKTSKRTIFIRNLIKEVAGQAPYEKRITELLKVGKD---KRALKVAKRKLGTHKRAKRKREEMSSVLRKMRSGGGGATEKKK----",
"----MTTPAVKTGLFVGLNKGHVVTRRE----------LAPRPNSRKGKTSKRTIFIRKLIREVAGMAPYEKRITELLKVGKD---KRALKVAKRKLGTHKRAKRKREEMSSVLRKMRSLGGAAAAEKKM---",
"----MTTPQVKTGLFVGLNKGHVVTRRE----------LAPRPRSRKGKTSKRTIFIRNLIKEVAGQAPYEKRITELLK------------VAKRKLGTHKRAKRKREEMSSVLRKMRSGGGGATEKKK----",
"----------MGEIAVGLNKGHQVTKKA----------GTPRPSRRKGFLSQRVKKVRAVVREVAGWAPYERRVMELLKVGKD---KRALKMCKRKLGTHMRGKKKREEMAGVLRKMQAASKGE---------",
"----MAPKQPNTGLFVGLNKGHIVTKKE----------LAPRPSDRKGKTSKRTHFVRNLIREVAGFAPYEKRITELLKVGKD---KRALKVRQEKVGHSQESKEEER--GDVQCSP--------PDEGWWWY",
"---------MAPGLVVGLNKGKVLTKRQ----------LPERPSRRKGQLSKRTSFVRSIVREVAGFAPYERRVMELIRNSQD---KRARKLAKKRLGTLKRAKGKIEELTSVIQSSRLAH------------",
"-------MGVQYKLAVGLGKGHKVTKNE----------YKPRPSRRKGALSKHTRFVRDLIREVCGFAPFERRAMELLKVSKD---KRALKFIKKRLGTHLRGKRKRDELSNVLVAQRKAAAHKEKTEHK---",
"-------MALRYPMAVGLNKGHKVTKNV----------SKPRHSRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"-------MALRYPMAVGLNKGHKVTKNV----------SKPRHSRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"-------MALRYPMAVGLNKGHKVTKNV----------GKPRHSRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"---------MAPGLVVGLNKGKTLTKRQ----------LPERPSRRKGHLSKRTAFVRSIVREVAGFAPYERRVMELIRNSQD---KRARKLAKKRLGTLKRAKGKIEELTSVIQSSRLAH------------",
"-------MAIRYPMAVGLNKGHKVTKNV----------SKPRHCRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"-------MAIRYPMAVGLNKGHRVTKNV----------TKPRHCRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRIGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"-------MAIRYPMAVGLNKGHRVTKNV----------TKPRHCRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRIGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"-------MALRYPMAVGLNKGHKVTKNV----------SKPRHSRRRGRLTKHTKFVRDMIREVCAFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"-------MAIRYPMAVGLNKGYKVTKNV----------SKPRHCRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"-------MVVRYPMAVGLNKGHKVTKNV----------SKPKHSRRRGRLTKHAKFARDLIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNTLAAMRKAAAKKE--------",
"-------MAIRYPMAVGLKKGHPVTKNV----------TKPKHSRRGGRLTKHSKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNILAAMRKAAAKKE--------",
"---MAKEAPAKTGLAVGLNKGHKTTARV----------VKPRVSRTKGHLSKRTAFVREVVKEVAGLAPYERRVIELLRNSKD---KRARKLAKKRLGTFGRAKRKVDELQRVIAESRRAH------------",
"-------MALRYPMAVGLNKGHKVTKNV----------SKPRHSRRRSRLTNHTKFVRDMIREVCGFAPYERRAMELLKVSKS---KRALKFIKKRVGTHIRAKRKREELSNVLAAMEEAAAKKD--------",
"-----MSGPGIEGLAVGLNKGHAATQLP----------VKQRQNRHKGVASKKTKIVRELVREITGFAPYERRVLEMLRISKD---KRALKFLKRRIGTHRRAKGKREELQNVIIAQRKAHK-----------",
"--------MAKSGIAAGVNKGRKTTAKE----------VAPKISYRKGASSQRTVFVRSIVKEVAGLAPYERRLIELIRNAGE---KRAKKLAKKRLGTHKRALRKVEEMTQVIAESRRH-------------",
"-------MAVRYELAIGLNKGHKTSKIRNVKYTGDKKVKGLRGSRLKNIQTRHTKFMRDLVREVVGHAPYEKRTMELLKVSKD---KRALKFLKRRLGTHIRAKRKREELSNILTQLRKAQTHAK--------",
"-------MAVKTGIAIGLNKGKKVTQMT----------PAPKISYKKGAASNRTKFVRSLVREIAGLSPYERRLIDLIRNSGE---KRARKVAKKRLGSFTRAKAKVEEMNNIIAASRRH-------------",
"-------MTVKTGIAIGLNKGKKVTSMT----------PAPKISYKKGAASNRTKFVRSLVREIAGLSPYERRLIDLIRNSGE---KRARKVAKKRLGSFTRAKAKVEEMNNIIAASRRH-------------",
"---------MAKGQAVGINKGFITTQLE-------KKLQKHSAVQRKGKLGKRVALVRQVIREVTGFAPYEKRIIELIKAGSAKDSKKATKIARKRLGTHRRAKVKKALLEEAVRAQRKK-------------",
"MSSAATKPVKRSGIIKGFNKGHAVAKRT------------VTSTFKKQVVTKRVAAIRDVIREISGFSPYERRVSELLKSGLD---KRALKVAKKRLGSIQAGKKKRDDIANINRKASAK-------------",
"MKNA--------------------YKKVRVRYPVKRPDVKRKQRGPRAETQESRFLAAAVADEISGLSPLEKKAISLLEAKNN---NKAQKLLRKRLGSHKRAVAKVEKLARMLLEK----------------"
]
invalid1 = []
msa = MSA(valid)
msa.get_sequence_weights()
msa.get_number_of_observations()
msa.get_pssm(use_sequence_weights=True)<file_sep>import numpy as np
import os
import pandas as pd
from selenium import webdriver
import sys
ex=sys.argv[1]
students=dict()
all=pd.read_csv('allstudents.csv', sep=',',header=None)
for s in all.values:
tmp={s[0]: s[1]}
students.update(tmp)
agg=''
students_1=pd.read_csv('students_'+str(ex)+'.csv', sep=',',header=None)
for s in students_1.values:
agg+=students[s[0]]
agg+=','
agg=agg[:-1]
print(agg)
'''
driver = webdriver.Firefox()
driver.get("https://artemis.ase.in.tum.de/#/course/28/exercise/476/dashboard")
button1=driver.findElement(By.xpath("//span[conatins(text(),'Download Repos')]"))
driver.execute_script("arguments[0].click();", button1)
username = driver.find_element_by_name('studentIds').send_keys(agg)
button2=driver.findElement(By.xpath("//span[conatins(text(),'Download Repositories')]"))
driver.execute_script("arguments[0].click();", button2)
'''
<file_sep># -*- coding: utf-8 -*-
"""
IMPORTANT!:
Before writing an email asking questions such as
'What does this input has to be like?' or
'What return value do you expect?' PLEASE read our
exercise sheet and the information in this template
carefully.
If something is still unclear, PLEASE talk to your
colleagues before writing an email!
If you experience technical issues or if you find a
bug we are happy to answer your questions. However,
in order to provide quick help in such cases we need
to avoid unnecessary emails such as the examples
shown above.
"""
import os
from Bio.PDB import Selection
from Bio.PDB.MMCIFParser import MMCIFParser # Tip: This module might be useful for parsing...
from Bio.PDB.Polypeptide import Polypeptide, PPBuilder
from Bio.PDB.PDBExceptions import PDBConstructionWarning
import warnings
import numpy as np
############# Exercise 2: Protein Data Bank #############
# General remark: In our exercise every structure will have EXACTLY ONE model.
# This is true for nearly all X-Ray structures. NMR structures have several models.
class PDB_Parser:
CIF_PARSER = MMCIFParser() # parser object for reading in structure in CIF format
def __init__( self, path ):
'''
Initialize every PDB_Parser with a path to a structure-file in CIF format.
An example file is included in the repository (7ahl.cif).
Tip: Store the parsed structure in an object variable instead of parsing it
again & again ...
'''
with warnings.catch_warnings():
warnings.simplefilter('ignore', PDBConstructionWarning)
self.structure = self.CIF_PARSER.get_structure("7AHL", path) # Parse the structure once and re-use it in the functions below
# 3.8 Chains
def get_number_of_chains( self ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
Return:
Number of chains in this structure as integer.
'''
n_chains = len(list(self.structure.get_chains()))
return n_chains
# 3.9 Sequence
def get_sequence( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the amino acid sequence (single-letter alphabet!) of a given chain (chain_id)
in a Biopython.PDB structure as a string.
'''
ppb = PPBuilder()
sequence = ppb.build_peptides(self.structure[0][chain_id])[0].get_sequence()
return sequence
# 3.10 Water molecules
def get_number_of_water_molecules( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the number of water molecules of a given chain (chain_id)
in a Biopython.PDB structure as an integer.
'''
n_waters = 0
chain = self.structure[0][chain_id]
for residue in chain.get_list():
hetfield = residue.get_id()[0]
if hetfield == "W":
n_waters += 1
return n_waters
# 3.11 C-Alpha distance
def get_ca_distance( self, chain_id_1, index_1, chain_id_2, index_2 ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id_1 : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
index_1 : index of a residue in a given chain in a Biopython.PDB structure
chain_id_2 : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
index_2 : index of a residue in a given chain in a Biopython.PDB structure
chain_id_1 and index_1 describe precisely one residue in a PDB structure,
chain_id_2 and index_2 describe the second residue.
Return:
Return the C-alpha (!) distance between the two residues, described by
chain_id_1/index_1 and chain_id_2/index_2. Round the returned value via int().
The reason for using two different chains as an input is that also the distance
between residues of different chains can be interesting.
Different chains in a PDB structure can either occur between two different proteins
(Heterodimers) or between different copies of the same protein (Homodimers).
'''
residue1 = self.structure[0][chain_id_1][index_1]["CA"]
residue2 = self.structure[0][chain_id_2][index_2]["CA"]
ca_distance = residue1 - residue2
return int(ca_distance)
# 3.12 Contact Map
def get_contact_map( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return a complete contact map (see description in exercise sheet)
for a given chain in a Biopython.PDB structure as numpy array.
The values in the matrix describe the c-alpha distance between all residues
in a chain of a Biopython.PDB structure.
Only integer values of the distance have to be given (see below).
'''
sequence = self.get_sequence(chain_id)
length = len(sequence)
chain = self.structure[0][chain_id]
amino = lambda residue: "W" not in residue.get_id()[0]
contact_map = np.zeros((length, length), dtype=np.float32)
for idx, residue in enumerate(filter(amino, chain.get_list())):
for idx2, residue2 in enumerate(filter(amino, chain.get_list())):
contact_map[idx][idx2] = residue["CA"] - residue2["CA"]
return contact_map.astype( np.int ) # return rounded (integer) values
# 3.13 B-Factors
def get_bfactors( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the B-Factors for all residues in a chain of a Biopython.PDB structure.
The B-Factors describe the mobility of an atom or a residue.
In a Biopython.PDB structure B-Factors are given for each atom in a residue.
Calculate the mean B-Factor for a residue by averaging over the B-Factor
of all atoms in a residue.
Sometimes B-Factors are not available for a certain residue;
(e.g. the residue was not resolved); insert np.nan for those cases.
Finally normalize your B-Factors using Standard scores (zero mean, unit variance).
You have to use np.nanmean, np.nanvar etc. if you have nan values in your array.
The returned data structure has to be a numpy array rounded again to integer.
'''
amino = lambda residue: "W" not in residue.get_id()[0]
length = len(self.get_sequence(chain_id))
chain = self.structure[0][chain_id]
factors = np.zeros(length, dtype=np.float32)
acids = filter(amino, chain.get_list())
for idx, residue in enumerate(acids):
atom_factors = np.zeros(len(residue))
for idx2, atom in enumerate(residue.get_atoms()):
atom_factors[idx2] = atom.get_bfactor()
factors[idx] = np.nanmean(atom_factors)
factors -= np.nanmean(factors)
factors /= np.std(factors)
return factors.astype( np.int ) # return rounded (integer) values
def main():
print('PDB parser class.')
obj = PDB_Parser("tests/7ahl.cif")
print("number of chains:", obj.get_number_of_chains())
print("sequence:", len(obj.get_sequence("C")))
print(obj.get_number_of_water_molecules("D"))
print(obj.get_ca_distance("A", 20, "A", 55))
# ca_result1 = obj.get_contact_map("A")
# ca_result2 = obj.get_contact_map("B")
# ca_result3 = obj.get_contact_map("C")
# ca_result4 = obj.get_contact_map("D")
# ca_result5 = obj.get_contact_map("E")
# cmap1= np.load("tests/contact_map_1.npy")
# cmap2 = np.load("tests/contact_map_2.npy")
# cmap3 = np.load("tests/contact_map_3.npy")
# cmap4 = np.load("tests/contact_map_4.npy")
# cmap5 = np.load("tests/contact_map_5.npy")
#
# print("A", np.array_equal(ca_result1, cmap1))
# print("B", np.array_equal(ca_result2, cmap2))
# print("C", np.array_equal(ca_result3, cmap3))
# print("D", np.array_equal(ca_result4, cmap4))
# print("E", np.array_equal(ca_result5, cmap5))
result = obj.get_bfactors("A")
print(len(result))
bfactors1 = np.load("tests/bfactors_1.npy")
bfactors2 = np.load("tests/bfactors_2.npy")
bfactors3 = np.load("tests/bfactors_3.npy")
bfactors4 = np.load("tests/bfactors_4.npy")
bfactors5 = np.load("tests/bfactors_5.npy")
print(len(bfactors1), len(bfactors2), len(bfactors3), len(bfactors4), len(bfactors5))
b_result1 = obj.get_bfactors("A")
b_result2 = obj.get_bfactors("B")
b_result3 = obj.get_bfactors("C")
b_result4 = obj.get_bfactors("D")
b_result5 = obj.get_bfactors("E")
print(np.array_equal(bfactors1, b_result1))
print(np.array_equal(bfactors2, b_result2))
print(np.array_equal(bfactors3, b_result3))
print(np.array_equal(bfactors4, b_result4))
print(np.array_equal(bfactors5, b_result5))
return None
if __name__ == '__main__':
main()<file_sep>import numpy as np
from pathlib import Path
import time
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.db_list = []
self.alphabet = ALPHABET
self.possible_words = set()
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
sequence_words = set()
for i in range(len(sequence)-2):
sequence_words.add(sequence[i:i+3])
self.possible_words.update(sequence_words)
self.db_list.append((sequence, sequence_words))
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
return [e[0] for e in self.db_list if word in e[1]]
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
n_seq = len(self.db_list)
n_words = len(self.possible_words)
avg_words = 0
for s in self.db_list:
avg_words += len(s[1])
avg_words /= n_seq
avg_seq = 0
for w in self.possible_words:
for s in self.db_list:
if w in s[1]:
avg_seq += 1
avg_seq /= n_words
return tuple((n_seq, n_words, round(avg_words), round(avg_seq)))
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.sub_matrix = substitution_matrix
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
possible_words = []
accepted_words_set = set()
for c1 in ALPHABET:
for c2 in ALPHABET:
for c3 in ALPHABET:
possible_words.append(c1+c2+c3)
if sequence is not None:
for w in possible_words:
for p in range(len(sequence)-2):
v0 = self.sub_matrix[AA_TO_INT[sequence[p]]] [AA_TO_INT[w[0]]]
v1 = self.sub_matrix[AA_TO_INT[sequence[p+1]]] [AA_TO_INT[w[1]]]
v2 = self.sub_matrix[AA_TO_INT[sequence[p+2]]] [AA_TO_INT[w[2]]]
if v0 + v1 + v2 >= T:
accepted_words_set.add(w)
else:
if pssm is not None:
for w in possible_words:
for p in range(len(pssm)-2):
v0 = pssm[p] [AA_TO_INT[w[0]]]
v1 = pssm[p+1] [AA_TO_INT[w[1]]]
v2 = pssm[p+2] [AA_TO_INT[w[2]]]
if v0 + v1 + v2 >= T:
accepted_words_set.add(w)
return list(accepted_words_set)
def get_words_indices(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
print('dsa', T)
possible_words = []
accepted_words = dict()
for c1 in ALPHABET:
for c2 in ALPHABET:
for c3 in ALPHABET:
possible_words.append(c1+c2+c3)
if sequence is not None:
for w in possible_words:
accepted_words[w] = []
for p in range(len(sequence)-2):
v0 = self.sub_matrix[AA_TO_INT[sequence[p]]] [AA_TO_INT[w[0]]]
v1 = self.sub_matrix[AA_TO_INT[sequence[p+1]]] [AA_TO_INT[w[1]]]
v2 = self.sub_matrix[AA_TO_INT[sequence[p+2]]] [AA_TO_INT[w[2]]]
if v0 + v1 + v2 >= T:
accepted_words[w].append(p)
else:
if pssm is not None:
for w in possible_words:
accepted_words[w] = []
for p in range(len(pssm)-2):
v0 = pssm[p] [AA_TO_INT[w[0]]]
v1 = pssm[p+1] [AA_TO_INT[w[1]]]
v2 = pssm[p+2] [AA_TO_INT[w[2]]]
if v0 + v1 + v2 >= T:
accepted_words[w].append(p)
return accepted_words
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
words = self.get_words(sequence = query, pssm = pssm, T = T )
word_indices = self.get_words_indices (sequence = query, pssm = pssm, T = T)
for w in words:
candidate_sequences = blast_db.get_sequences(w)
for s in candidate_sequences:
HSPs = set()
indices_candidate = []
for i in range(len(s)-2):
if s[i] == w[0] and s[i+1] == w[1] and s[i+2] == w[2]:
indices_candidate.append(i)
indices_query = []
if w in word_indices:
indices_query = word_indices[w]
if query is not None:
for i in indices_candidate:
for j in indices_query:
beginning_c = i
end_c = i+2
beginning_q = j
end_q = j+2
score = 0
score += self.sub_matrix [AA_TO_INT[query[beginning_q ]]] [AA_TO_INT[s[beginning_c ]]]
score += self.sub_matrix [AA_TO_INT[query[beginning_q+1]]] [AA_TO_INT[s[beginning_c+1]]]
score += self.sub_matrix [AA_TO_INT[query[beginning_q+2]]] [AA_TO_INT[s[beginning_c+2]]]
#Moving HSP window right
max_score = score
overstepped = 0
while end_c < len(s) - 1 and end_q < len(query) - 1:
end_q = end_q+1
end_c = end_c+1
nextscore = self.sub_matrix [AA_TO_INT[query[end_q]]] [AA_TO_INT[s[end_c]]]
score += nextscore
if score > max_score:
max_score = score
overstepped = 0
elif score > max_score - X:
overstepped += 1
else:
overstepped += 1
break
if overstepped > 0:
end_q = end_q - overstepped
end_c = end_c - overstepped
score = max_score
#Moving HSP window left
max_score = score
overstepped = 0
while beginning_c > 0 and beginning_q > 0:
beginning_q = beginning_q-1
beginning_c = beginning_c-1
nextscore = self.sub_matrix [AA_TO_INT[query[beginning_q]]] [AA_TO_INT[s[beginning_c]]]
score += nextscore
if score > max_score:
max_score = score
overstepped = 0
elif score > max_score - X:
overstepped += 1
else:
overstepped += 1
break
if overstepped > 0:
beginning_q = beginning_q + overstepped
beginning_c = beginning_c + overstepped
score = max_score
if score >= S:
HSPs.add((beginning_q, beginning_c, end_c - beginning_c + 1, score))
elif pssm is not None:
for i in indices_candidate:
for j in indices_query:
beginning_c = i
end_c = i+2
beginning_q = j
end_q = j+2
score = 0
score += pssm[beginning_q] [AA_TO_INT[s[beginning_c ]]]
score += pssm[beginning_q+1] [AA_TO_INT[s[beginning_c+1]]]
score += pssm[beginning_q+2] [AA_TO_INT[s[beginning_c+2]]]
#Moving HSP window right
max_score = score
overstepped = 0
while end_c < len(s) - 1 and end_q < len(pssm) - 1:
end_q = end_q+1
end_c = end_c+1
nextscore = pssm[end_q] [AA_TO_INT[s[end_c]]]
score += nextscore
if score > max_score:
max_score = score
overstepped = 0
elif score > max_score - X:
overstepped += 1
else:
overstepped += 1
break
if overstepped > 0:
end_q = end_q - overstepped
end_c = end_c - overstepped
score = max_score
#Moving HSP window left
max_score = score
overstepped = 0
while beginning_c > 0 and beginning_q > 0:
beginning_q = beginning_q-1
beginning_c = beginning_c-1
nextscore = pssm[beginning_q] [AA_TO_INT[s[beginning_c]]]
score += nextscore
if score > max_score:
max_score = score
overstepped = 0
elif score > max_score - X:
overstepped += 1
else:
overstepped += 1
break
if overstepped > 0:
beginning_q = beginning_q + overstepped
beginning_c = beginning_c + overstepped
score = max_score
if score >= S:
HSPs.add((beginning_q, beginning_c, end_c - beginning_c + 1, score))
if len(HSPs) > 0:
if s not in d:
d[s] = HSPs
else:
d[s] = d[s].union(HSPs)
return d
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
words = self.get_words(sequence = query, pssm = pssm, T = T)
word_indices = self.get_words_indices (sequence = query, pssm = pssm, T = T)
counter = 0
sequences = blast_db.db_list
counter = 0
for s, word_list in sequences:
used_words_indexes = set()
calced_HSP = set()
print (counter, len(sequences))
counter +=1
for i in range(len(s)-2):
w1 = s[i] + s[i+1] + s[i+2]
for j in range(3, A):
if i+j+2 < len(s):
w2 = s[i+j] + s[i+j+1] + s[i+j+2]
t1 = i
t2 = i+j
if (t1 in used_words_indexes) or (t1 - 1) in used_words_indexes or t1-2 in used_words_indexes or (t2 in used_words_indexes or t2-1 in used_words_indexes or t2-2 in used_words_indexes):
continue
word_1_query = []
if w1 in word_indices:
word_1_query = word_indices[w1]
if not word_1_query:
continue
word_2_query = []
if w2 in word_indices:
word_2_query = word_indices[w2]
if not word_2_query:
continue
if query is not None:
for q1 in word_1_query:
for q2 in word_2_query:
if (q1, q2) not in calced_HSP:
calced_HSP.add((q1,q2))
if abs(t1-t2) >= 3 and abs(t1-t2) <= A and t1-t2 == q1-q2:
if q1 < q2:
left_q_begin = q1
right_q_begin = q2
left_t_begin = t1
right_t_begin = t2
else:
right_q_begin = q2
left_q_begin = q1
left_t_begin = t2
right_t_begin = t1
left_q_end = left_q_begin + 2
right_q_end = right_q_begin + 2
left_t_end = left_t_begin + 2
right_t_end = right_t_begin + 2
score = self.sub_matrix [AA_TO_INT[query[right_q_begin ]]] [AA_TO_INT[s[right_t_begin ]]] + self.sub_matrix [AA_TO_INT[query[right_q_begin+1]]] [AA_TO_INT[s[right_t_begin+1]]] + self.sub_matrix [AA_TO_INT[query[right_q_begin+2]]] [AA_TO_INT[s[right_t_begin+2]]]
#Expanding right window to the left
max_score = score
overstepped = 0
while right_t_begin > 0 and right_q_begin > 0:
right_q_begin = right_q_begin-1
right_t_begin = right_t_begin-1
nextscore = self.sub_matrix [AA_TO_INT[query[right_q_begin]]] [AA_TO_INT[s[right_t_begin]]]
score += nextscore
if score > max_score:
max_score = score
overstepped = 0
elif score > max_score - X:
overstepped += 1
else:
overstepped += 1
break
if overstepped > 0:
right_q_begin = right_q_begin + overstepped
right_t_begin = right_t_begin + overstepped
score = max_score
if right_q_begin <= left_q_end and right_t_begin <= left_t_end:
q_begin = right_q_begin
q_end = right_q_end
t_begin = right_t_begin
t_end = right_t_end
#Expanding window to the right
max_score = score
overstepped = 0
while t_end < len(s) - 1 and q_end < len(query) - 1:
q_end = q_end+1
t_end = t_end+1
nextscore = self.sub_matrix [AA_TO_INT[query[q_end]]] [AA_TO_INT[s[t_end]]]
score += nextscore
i
if score > max_score:
max_score = score
overstepped = 0
elif score > max_score - X:
overstepped += 1
else:
overstepped += 1
break
if overstepped > 0:
q_end = q_end - overstepped
t_end = t_end - overstepped
score = max_score
if score >= S:
HSPs = set()
HSPs.add((q_begin, t_begin, t_end - t_begin + 1, score))
if s not in d:
d[s] = HSPs
else:
d[s] = d[s].union(HSPs)
used_words_indexes.add(t1)
used_words_indexes.add(t2)
"""
if s == sequence:
print((q_begin, t_begin, t_end - t_begin + 1, score, w1, w2, t1, t2, right_q_begin, left_q_begin))
"""
elif pssm is not None:
for q1 in word_1_query:
for q2 in word_2_query:
if abs(t1-t2) >= 3 and abs(t1-t2) <= A and t1-t2 == q1-q2:
if q1 < q2:
left_q_begin = q1
right_q_begin = q2
left_t_begin = t1
right_t_begin = t2
else:
right_q_begin = q2
left_q_begin = q1
left_t_begin = t2
right_t_begin = t1
left_q_end = left_q_begin + 2
right_q_end = right_q_begin + 2
left_t_end = left_t_begin + 2
right_t_end = right_t_begin + 2
score = 0
score += pssm[right_q_begin ] [AA_TO_INT[s[right_t_begin ]]]
score += pssm[right_q_begin+1] [AA_TO_INT[s[right_t_begin+1]]]
score += pssm[right_q_begin+2] [AA_TO_INT[s[right_t_begin+2]]]
#Expanding right window to the left
max_score = score
overstepped = 0
while right_t_begin > 0 and right_q_begin > 0:
right_q_begin = right_q_begin-1
right_t_begin = right_t_begin-1
nextscore = pssm[right_q_begin] [AA_TO_INT[s[right_t_begin]]]
score += nextscore
if score > max_score:
max_score = score
overstepped = 0
elif score > max_score - X:
overstepped += 1
else:
overstepped += 1
break
if overstepped > 0:
right_q_begin = right_q_begin + overstepped
right_t_begin = right_t_begin + overstepped
score = max_score
if right_q_begin <= left_q_end and right_t_begin <= left_t_end:
q_begin = right_q_begin
q_end = right_q_end
t_begin = right_t_begin
t_end = right_t_end
#Expanding window to the right
max_score = score
overstepped = 0
while t_end < len(s) - 1 and q_end < len(pssm) - 1:
q_end = q_end+1
t_end = t_end+1
nextscore = pssm[q_end] [AA_TO_INT[s[t_end]]]
score += nextscore
if score > max_score:
max_score = score
overstepped = 0
elif score > max_score - X:
overstepped += 1
else:
overstepped += 1
break
if overstepped > 0:
q_end = q_end - overstepped
t_end = t_end - overstepped
score = max_score
if score >= S:
HSPs = set()
HSPs.add((q_begin, t_begin, t_end - t_begin + 1, score))
if s not in d:
d[s] = HSPs
else:
d[s] = d[s].union(HSPs)
used_words_indexes.add(t1)
used_words_indexes.add(t2)
print(d)
return d<file_sep>##############
# Exercise 2.5
##############
# You can use the supplied test cases for your own testing. Good luck!
code_dict = {
'GCT': 'A',
'GCC': 'A',
'GCA': 'A',
'GCG': 'A',
'CGT': 'R',
'CGC': 'R',
'CGA': 'R',
'CGG': 'R',
'AGA': 'R',
'AGG': 'R',
'AAT': 'N',
'AAC': 'N',
'GAT': 'D',
'GAC': 'D',
'TGT': 'C',
'TGC': 'C',
'CAA': 'Q',
'CAG': 'Q',
'GAA': 'E',
'GAG': 'E',
'GGT': 'G',
'GGC': 'G',
'GGA': 'G',
'GGG': 'G',
'CAT': 'H',
'CAC': 'H',
'ATT': 'I',
'ATC': 'I',
'ATA': 'I',
'CTT': 'L',
'CTC': 'L',
'CTA': 'L',
'CTG': 'L',
'TTA': 'L',
'TTG': 'L',
'AAA': 'K',
'AAG': 'K',
'ATG': 'M',
'TTT': 'F',
'TTC': 'F',
'CCT': 'P',
'CCC': 'P',
'CCA': 'P',
'CCG': 'P',
'TCT': 'S',
'TCC': 'S',
'TCA': 'S',
'TCG': 'S',
'AGT': 'S',
'AGC': 'S',
'ACT': 'T',
'ACC': 'T',
'ACA': 'T',
'ACG': 'T',
'TGG': 'W',
'TAT': 'Y',
'TAC': 'Y',
'GTT': 'V',
'GTC': 'V',
'GTA': 'V',
'GTG': 'V',
'TAA': '',
'TAG': '',
'TGA': ''
}
COMPLEMENTS = {
'A': 'T',
'T': 'A',
'C': 'G',
'G': 'C'
}
def codons_to_aa(orf):
orf = orf.upper()
codon_length = 3
amino_acids = []
for i in range(0, len(orf), codon_length):
amino_acids.append(code_dict[orf[i:i+codon_length]])
return ''.join(amino_acids)
def aa_dist(aa_seq):
counted = Counter(aa_seq)
for key in counted:
counted[key] /= len(aa_seq)
return counted
def complementary(strand):
return ''.join(COMPLEMENTS[character] for character in strand.upper())
def get_orfs(genome):
genome = genome.upper()
chars = {"A","T","G","C"}
if any((c not in chars) for c in genome):
raise TypeError('ERROR')
stops = ["TAA", "TGA", "TAG"]
start = ["ATG"]
start_index = -1
result = []
double_genome = genome + genome
# forward
for k in range(0,3):
for i in range(0, len(double_genome) -k -3, 3):
current_codon = double_genome[i + k : i + 3 + k]
# takes the earliest start codon
if current_codon in start and start_index == -1:
start_index = i + k
if current_codon in stops and start_index != -1:
stop_index = i + k
# TODO check here if stop codon is counted as codon in the protein
if ((stop_index - start_index) / 3) + 1 > 34:
for t in result:
print(t[1])
if stop_index == t[1]:
print(stop_index)
tup = (start_index % (len(double_genome)//2), stop_index % (len(double_genome)//2) + 2, codons_to_aa(double_genome[start_index:stop_index]), False)
result.append(tup)
start_index = -1
start_index = -1
# eliminate doubles
new_result = []
for tup in result:
val = True
for check in result:
if tup[1] == check[1] and len(tup[2]) < len(check[2]):
val = False
break
if val:
new_result.append(tup)
# print(new_result)
# backward
comp_dna_seq = complementary(double_genome)[::-1]
dna_length = len(comp_dna_seq) - 1
start_index = -1
result_backward = []
for k in range(0,3):
for i in range(0, len(comp_dna_seq) - k - 3, 3):
current_codon = comp_dna_seq[i + k : i + 3 + k]
if current_codon in start and start_index == -1:
start_index = dna_length - (i + k)
if current_codon in stops and start_index != -1:
stop_index = dna_length - (i + k)
if ((start_index - stop_index) / 3) + 1 > 34:
tup = (start_index % (len(double_genome)//2), stop_index % (len(double_genome)//2) - 2, codons_to_aa(comp_dna_seq[dna_length - start_index: dna_length - stop_index]), True)
result_backward.append(tup)
start_index = -1
start_index = -1
# eliminate doubles
new_result_backward = []
for tup in result_backward:
val = True
for check in result_backward:
if tup[1] == check[1] and len(tup[2]) < len(check[2]):
val = False
break
if val:
new_result_backward.append(tup)
return new_result + new_result_backward
<file_sep>import numpy as np
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.max_row = 0
self.max_column = 0
self.alignment = ('', '')
self.align()
def align(self):
"""
Align given strings using the Smith-Waterman algorithm.
NB: score matrix and the substitution matrix are different matrices!
"""
self.compute_score_matrix()
self.compute_alignment()
def compute_score_matrix(self):
"""
Initialize the score matrix.
Fill the first row and the first column with zeroes,
then calculate the top score for each empty cell starting
from top left.
"""
max_value = 0
for row in range(1, len(self.string2) + 1):
for column in range(1, len(self.string1) + 1):
match = (
self.score_matrix[row - 1, column - 1]
+ self.substitution_matrix[self.string1[column - 1]][self.string2[row - 1]]
)
string1_insertion = (
self.score_matrix[row - 1, column] + self.gap_penalty
)
string2_insertion = (
self.score_matrix[row, column - 1] + self.gap_penalty
)
new_value = max(
match, string1_insertion, string2_insertion, 0
)
if new_value > max_value:
max_value = new_value
self.max_row = row
self.max_column = column
self.score_matrix[row, column] = new_value
def compute_alignment(self):
string1_aligned = []
string1_aligned_indices = []
string2_aligned = []
string2_aligned_indices = []
row = self.max_row
column = self.max_column
while self.score_matrix[row, column] > 0:
if (row > 0 and
column > 0 and
self.score_matrix[row, column] == self.score_matrix[row - 1, column - 1]
+ self.substitution_matrix[self.string1[column - 1]][self.string2[row - 1]]):
string1_aligned.append(self.string1[column - 1])
string1_aligned_indices.append(column - 1)
string2_aligned.append(self.string2[row - 1])
string2_aligned_indices.append(row - 1)
row -= 1
column -= 1
elif (row > 0 and
self.score_matrix[row, column] == self.score_matrix[row - 1, column]
+ self.gap_penalty):
# insertion into string1
string1_aligned.append('-')
string2_aligned.append(self.string2[row - 1])
string2_aligned_indices.append(row - 1)
row -= 1
else:
# insertion into string2
string1_aligned.append(self.string1[column - 1])
string1_aligned_indices.append(column - 1)
string2_aligned.append('-')
column -= 1
string1_aligned.reverse()
string2_aligned.reverse()
self.alignment = (''.join(string1_aligned), ''.join(string2_aligned))
self.string1_aligned_indices = string1_aligned_indices
self.string2_aligned_indices = string2_aligned_indices
def has_alignment(self):
"""
:return: True if a local alignment has been found, False otherwise
"""
return self.alignment != ('', '')
def get_alignment(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
return self.alignment
def is_residue_aligned(self, string_number, residue_index):
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been alined
False otherwise
"""
if string_number == 1:
return residue_index in self.string1_aligned_indices
if string_number == 2:
return residue_index in self.string2_aligned_indices
return False
<file_sep>import numpy as np
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.trace = {}
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substituion_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.align()
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
# Initialize
self.score_matrix[0][0] = 0
for row in range(len(self.string2) + 1):
self.score_matrix[row][0] = row * self.gap_penalty
for col in range(len(self.string1) + 1):
self.score_matrix[0][col] = col * self.gap_penalty
for row in range(1, len(self.string2) + 1):
for col in range(1, len(self.string1) +1):
diag = self.score_matrix[row-1][col-1] + self.substituion_matrix[self.string2[row-1]][self.string1[col-1]]
up = self.score_matrix[row-1][col] + self.gap_penalty
left = self.score_matrix[row][col-1] + self.gap_penalty
directions = [diag, up, left]
max_indices = np.where(directions == np.max(directions))
self.score_matrix[row][col] = directions[max_indices[0][0]]
if (row, col) not in self.trace.keys():
self.trace[(row, col)] = []
for max_index in max_indices[0]:
if max_index == 0:
self.trace[(row, col)].append("d")
elif max_index == 1:
self.trace[(row, col)].append("v")
elif max_index == 2:
self.trace[(row, col)].append("h")
else:
print ("something strange happened! ({})".format(max_index))
def get_best_score(self):
"""
:return: the highest score for the aligned strings, int
"""
return self.score_matrix[-1][-1]
def find_alignments(self, row, col, alignment):
str1, str2 = alignment
alignments = []
if row == 0 and col == 0:
return [alignment]
# First, check left, up and diag if we can come from them
tmp_str1 = str1
tmp_str2 = str2
for direction in self.trace[(row, col)]:
str1 = tmp_str1
str2 = tmp_str2
if direction == "h":
# We come from left
str1 = self.string1[col-1] + str1
str2 = "-" + str2
alignments += self.find_alignments(row, col-1, (str1, str2))
elif direction == "v":
# We come from up
str1 = "-" + str1
str2 = self.string2[row-1] + str2
alignments += self.find_alignments(row-1, col, (str1, str2))
elif direction == "d":
# We come frome diag
str1 = self.string1[col-1] + str1
str2 = self.string2[row-1] + str2
alignments += self.find_alignments(row-1, col-1, (str1, str2))
"""
left = self.score_matrix[row][col-1] + self.gap_penalty
if left == self.score_matrix[row][col]:
# We can come from left
str1 = self.string1[col-1] + str1
str2 = "-" + str2
print ("left")
alignments += self.find_alignments(row, col-1, (str1, str2))
up = self.score_matrix[row-1][col] + self.gap_penalty
if up == self.score_matrix[row][col]:
# We can come from up
str1 = "-" + str1
str2 = self.string2[row-1] + str2
print("up")
alignments += self.find_alignments(row-1, col, (str1, str2))
diag = self.score_matrix[row-1][col-1] + self.substituion_matrix[self.string2[row-1]][self.string1[col-1]]
if diag == self.score_matrix[row][col]:
# We can come frome diag
str1 = self.string1[col-1] + str1
str2 = self.string2[row-1] + str2
print("diag")
alignments += self.find_alignments(row-1, col-1, (str1, str2))
"""
return alignments
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score
"""
return len(self.get_alignments())
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
alignments = self.find_alignments(len(self.score_matrix)-1,len(self.score_matrix[0])-1,("",""));
print ("string1: {}, string2: {}".format(self.string1, self.string2))
print (self.score_matrix)
print (self.trace)
print (alignments)
return alignments
#return [
# ('ADMI-NS', 'ADMIRES'), ('ADMIN-S', 'ADMIRES')
#]
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as a list of lists
"""
ex = [
[0, -1, -2, -3, -4, -5, -6],
[-1, 1, 0, -1, -2, -3, -4],
[-2, 0, 2, 1, 0, -1, -2],
[-3, -1, 1, 3, 2, 1, 0],
[-4, -2, 0, 2, 4, 3, 2],
[-5, -3, -1, 1, 3, 4, 3],
[-6, -4, -2, 0, 2, 3, 4],
[-7, -5, -3, -1, 1, 2, 4]
]
return self.score_matrix
<file_sep>##############
# Exercise 2.6
##############
from collections import Counter
from itertools import groupby
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
return int(len(self.__sequences))
def get_average_length(self):
total_length = 0.0
for seq in self.__sequences:
total_length += len(seq)
average_length = float(total_length/self.get_counts())
return float(average_length)
def read_fasta(self, path):
# f = open(path, "r")
# groups = groupby(f, key=lambda line: not(line.startswith(">") or line.startswith(";")))
# for key, group in groups:
# if not key:
# seq = list(group)[0].strip(), ''.join(map(str.rstrip, next(groups)[1],'')).rstrip('*')
# self.__sequences.append(seq)
with open(path, "r") as f:
seq = ""
sequence_started = False
for line in f:
if line.startswith(">") or line.startswith(";"):
if sequence_started:
self.__sequences.append(seq.rstrip('*'))
seq = ""
sequence_started = False
continue
sequence_started = True
seq += line.strip()
self.__sequences.append(seq.rstrip('*'))
def get_abs_frequencies(self):
# return number of occurences not normalized by length
proteins = ""
for seq in self.__sequences:
proteins += seq
occurrences = Counter(proteins)
abs_frequencies = dict(occurrences.most_common())
return abs_frequencies
def get_av_frequencies(self):
# return number of occurences normalized by length
total_length = 0.0
for seq in self.__sequences:
total_length += len(seq)
aa_av_frequencies = self.get_abs_frequencies()
for aa, count in aa_av_frequencies.items():
#average = float(1/(count/total_length))
average = float(count/total_length)
aa_av_frequencies[aa] = average
return aa_av_frequencies
# def main():
# dist = AADist("./tests/tests.fasta")
# print("get_counts(): " + str(dist.get_counts()))
# print("get_average_length(): " + str(dist.get_average_length()))
# print("get_abs_frequencies(): ")
# print(dist.get_abs_frequencies())
# print("get_av_frequencies(): ")
# print(dist.get_av_frequencies())
# main()
<file_sep>import numpy as np
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.align()
def align(self):
"""
Align given strings using the Smith-Waterman algorithm.
NB: score matrix and the substitution matrix are different matrices!
"""
# score matrix has len(string2)+1 rows, len(string1)+1 cols
row, col = self.score_matrix.shape
self.score_matrix[:,0] = 0
self.score_matrix[0,:] = 0
for i in range(1,row,1): # string2
for j in range(1,col,1): #string1
s1 = self.string1[j-1]
s2 = self.string2[i-1]
score = self.substitution_matrix[s1][s2]
candidate = np.asarray([0,self.score_matrix[i,j-1]+self.gap_penalty,self.score_matrix[i-1,j]+self.gap_penalty,self.score_matrix[i-1,j-1]+score])
self.score_matrix[i,j] = np.max(candidate)
def has_alignment(self):
"""
:return: True if a local alignment has been found, False otherwise
"""
max_score = np.max(self.score_matrix)
if max_score == 0:return False
else:return True
def get_alignment(self):
"""
:return: alignment represented as a tuple of aligned strings
"""
if self.has_alignment():
result = []
def trace_back(i,j,align1,align2):
# stop point
if self.score_matrix[i,j] ==0:
self.i = i
self.j = j
result.append((align1,align2))
else:
# print(i,j)
current_score = self.score_matrix[i,j]
if current_score == self.score_matrix[i-1,j]+self.gap_penalty: trace_back(i-1,j,"-"+align1,self.string2[i-1]+align2)
if current_score == self.score_matrix[i,j-1] + self.gap_penalty: trace_back(i,j-1,self.string1[j-1]+align1,"-"+align2)
if current_score == self.score_matrix[i-1,j-1] + self.substitution_matrix[self.string1[j-1]][self.string2[i-1]]: trace_back(i-1,j-1,self.string1[j-1]+align1,self.string2[i-1]+align2)
row,col = np.where(self.score_matrix == np.max(self.score_matrix))
# print(row,col)
trace_back(row[0],col[0],"","")
return result[0]
else:return ("","")
def is_residue_aligned(self, string_number, residue_index):
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been alined
False otherwise
"""
self.get_alignment()
row, col = np.where(self.score_matrix == np.max(self.score_matrix))
start1 = self.j
stop1 = col[0]-1
start2 = self.i
stop2= row[0]-1
if string_number ==1:
if residue_index>= start1 and residue_index<= stop1: return True
else:return False
else:
if residue_index>= start2 and residue_index<= stop2: return True
else:return False
return False
if __name__ == '__main__':
from tests.matrices import MATRICES
import json
with open(
"/Users/wangyu/Documents/Protein_prediction/exercise/pp1ss19exercise3-exercise-ge56sen/tests/local_test.json") as json_file:
json_data = json.load(json_file)
# l = LocalAlignment("ARNDCEQGHI","LKMFPSTWYV",-6,MATRICES["identity"])
# b = l.score_matrix
# print(b)
# b = l.get_alignment()
l = LocalAlignment("ARNDCEQGHI","DDCEQHG",-6,MATRICES["blosum"])
b = l.score_matrix
print(b)
b = l.get_alignment()
print(b)
<file_sep>##############
# Exercise 2.7
##############
"""
The goal of this exercise is to introduce the different physicochemical properties of amino acids.
To do so, you are asked to write several small functions in Python.
All functions take a single letter amino acid code symbol and return True or False depending if
they have the respective property.
Implement the following functions:
• isCharged
• isPositivelyCharged
• isNegativelyCharged
• isHydrophobic
• isAromatic
• isPolar
• isProline
• containsSulfur
• isAcid
• isBasic
"""
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
plus=['R', 'H', 'K']
return aa in plus
def isNegativelyCharged(aa):
minus=['D', 'E']
return aa in minus
def isHydrophobic(aa):
hydrophob= ['V', 'F', 'M', 'L', 'I', 'A', 'Y', 'W']
# hydrophob=['V', 'F', 'M', 'L', 'I', 'C', 'A', 'G', 'Y']
# ['V', 'F', 'M', 'L', 'I', 'C', 'T', 'W', 'H', 'K', 'A', 'G', 'Y']
return aa in hydrophob
# AssertionError: Your code misclassified an amino acid that is hydrophobic.
# assert False + where False = all([True, True, True, False, True, True, ...])
# added G but still the same error
def isAromatic(aa):
aromatic=['H', 'F', 'W', 'Y']
return aa in aromatic
def isPolar(aa):
polar=['R', 'N', 'D', 'Q', 'E', 'H', 'K', 'S', 'T', 'Y']
return aa in polar
def isProline(aa):
return aa == 'P'
def containsSulfur(aa):
sulfur=['C','M']
return aa in sulfur
def isAcid(aa):
acid=['D', 'E']
return aa in acid
def isBasic(aa):
basic=['R', 'H', 'K']
return aa in basic
<file_sep>import numpy as np
import re
from collections import Counter as counter
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
# Check for sequence validity
alphapet_re = "|".join(list(ALPHABET))
pattern = '('+alphapet_re+')*'
# Raise TypeError if MSA contains no sequences, not all sequences are the same length, or some sequences contain characters other than amino acids or gap characters
if not len(set(map(len, sequences))) == 1 or not all([bool(re.fullmatch(pattern, seq)) for seq in sequences]): # https://stackoverflow.com/questions/35791051/better-way-to-check-if-all-lists-in-a-list-are-the-same-length
raise TypeError
# Count aa occurences. length x AAs : len(sequences[0]) x 20
# counts = np.zeros((, 20))
self.sequences = np.array([list(s) for s in sequences])
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
# pssm = np.zeros((20, 20))
# Count (potentialy with weights) the observed amino acids and gaps
counts = np.zeros((self.get_size()[1], 21))
all_weights = self.get_sequence_weights()
for i in range(self.get_size()[1]):
column = self.sequences[:, i] # equivalent to np.take(self.sequences, i, axis=1)
AAs_at_pos = counter(column)
for k in AAs_at_pos.keys():
seq_ids = np.where(k==column)[0] # sequences that contain aa k at position i.
if use_sequence_weights:
weights = np.take(all_weights, seq_ids)
counts[i, AA_TO_INT[k]] = np.sum(weights)
else:
counts[i, AA_TO_INT[k]] = AAs_at_pos[k]
# np.where(np.isin(column, AAs_at_pos.keys())) # Attempting to avoid looping over keys
if not bg_matrix: # If no background frequencies are provided, use uniform background frequencies (i.e. 1/20 = 0.05)
bg_freqs = np.repeat(np.array(0.05, dtype=np.float64), 20)
bg_matrix = np.tile(np.divide(bg_freqs, 20), (20, 1))
else:
bg_freqs = np.sum(bg_matrix, axis=0)
# Redistribute gaps according to background frequencies
if redistribute_gaps:
gap_counts = np.take(counts, -1, axis=1).reshape(self.get_size()[1], 1)
counts[:, -1] = gap_counts.reshape(self.get_size()[1])
gap_counts_dup = np.repeat(gap_counts, 20, axis=1)
adjusted_gap_count = np.multiply(gap_counts_dup, bg_freqs) # Or adjusted_gap_count = gap_counts_dup*bg_freqs
counts[:, :20] = counts[:, :20] + adjusted_gap_count
if add_pseudocounts:
alpha = self.get_number_of_observations()-1
freq_div_bg = np.divide(counts[:, :20], bg_freqs) # Fij_div_Pj
pseudocounts = np.dot(freq_div_bg, bg_matrix) # Need to consider case where bg_matrix is not provided
weighted_counts = np.multiply(alpha, counts[:, :20])
weighted_pseudocounts = np.multiply(beta, pseudocounts)
adjusted_freqs = np.divide(np.add(weighted_counts, weighted_pseudocounts), alpha+beta)
row_sums = np.sum(adjusted_freqs, axis=1).reshape(self.get_size()[1],1)
normalized = np.divide(adjusted_freqs, row_sums)
else:
row_sums = np.sum(counts[:, :20], axis=1).reshape(self.get_size()[1],1)
normalized = np.divide(counts[:, :20], row_sums)
pssm = np.divide(normalized, bg_freqs)
pssm = 2*np.log2(pssm)
pssm = np.where(np.isneginf(pssm), -20, pssm) # one could also make use of the 'out' parameter.
seq_ids = np.where('-'!=self.sequences[0])[0]
pssm = pssm[seq_ids, :]
return np.rint(pssm).astype(np.int64)
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return (len(self.sequences), len(self.sequences[0]))
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
# return self.sequences[0].replace('-', '')
return "".join(self.sequences[0]).replace('-', '')
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
# weights = np.zeros(self.get_size()[0], dtype=np.float64)
self.r_vals = np.zeros(self.get_size()[1], dtype=np.int64)
cell_weight = np.zeros_like(self.sequences, dtype=np.float64)
for i in range(self.get_size()[1]):
column = self.sequences[:, i] # equivalent to np.take(self.sequences, i, axis=1)
counts = counter(column)
# inefficient way
""" s_vals = np.array([counts[val] for val in column])
self.r_vals[i] = len(counts)
for j in range(len(column)):
cell_weight[j][i] = 1/(self.r_vals[i]*counts[self.sequences[j][i]]) """
# more efficient way
s_vals = np.array(list(map(lambda key: counts[key], column)), dtype=np.int64)
self.r_vals[i] = len(counts)
if self.r_vals[i]>1:
# corresponds to S_i,k
cell_weight[:, i] = np.divide(1, np.multiply(self.r_vals[i], s_vals)) # Or cell_weight[:, i] = 1/(self.r_vals[i]*s_vals[:])
weights = cell_weight.sum(1)
# weights = [sum(x) for x in cell_weight]
return weights.astype(np.float64)
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
num_obs = (1/len(self.sequences[0]))*sum(self.r_vals)
return num_obs.astype(np.float64)
<file_sep>import numpy as np
from contextlib import suppress
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros(
(len(string2) + 1, len(string1) + 1), dtype=np.int)
self.pred_matrix = np.zeros(
(len(string2) + 1, len(string1) + 1), dtype=np.int)
self.align()
def align(self):
"""
Align given strings using the Smith-Waterman algorithm.
NB: score matrix and the substitution matrix are different matrices!
"""
for r in range(len(self.string2) + 1):
for c in range(len(self.string1) + 1):
s1 = s2 = s3 = -999
# diag
with suppress(Exception):
if self.substitution_matrix is None:
d = 1 if self.string1[c -
1] == self.string2[r - 1] else -1
else:
d = self.substitution_matrix[
self.string1[c - 1]
][
self.string2[r - 1]
]
assert r - 1 >= 0
assert c - 1 >= 0
s1 = d + self.score_matrix[r - 1][c - 1]
# top
with suppress(Exception):
d = self.gap_penalty
assert r - 1 >= 0
s2 = d + self.score_matrix[r - 1][c]
# right
with suppress(Exception):
d = self.gap_penalty
assert c - 1 >= 0
s3 = d + self.score_matrix[r][c - 1]
s = max(s1, s2, s3)
self.score_matrix[r][c] = s if s > 0 else 0
self.pred_matrix[r][c] += 1 if s == s1 else 0
self.pred_matrix[r][c] += 2 if s == s2 else 0
self.pred_matrix[r][c] += 4 if s == s3 else 0
def has_alignment(self):
"""
:return: True if a local alignment has been found, False otherwise
"""
return self.score_matrix.max() > 0
def rec_alignments(self, r, c, prev1="", prev2=""):
s = []
if r + c == 0 or self.score_matrix[r][c] == 0:
return [(prev1, prev2)]
d = self.pred_matrix[r][c]
if r == 0:
d = 4
if c == 0:
d = 2
if d & 1: # diag
c1 = self.string1[c - 1]
c2 = self.string2[r - 1]
next1 = c1 + prev1
next2 = c2 + prev2
for i in self.rec_alignments(r-1, c-1, next1, next2):
s.append(i)
if d & 2: # top
c1 = '-'
c2 = self.string2[r - 1]
next1 = c1 + prev1
next2 = c2 + prev2
for i in self.rec_alignments(r-1, c, next1, next2):
s.append(i)
if d & 4: # left
c1 = self.string1[c - 1]
c2 = '-'
next1 = c1 + prev1
next2 = c2 + prev2
for i in self.rec_alignments(r, c-1, next1, next2):
s.append(i)
return s
def get_alignment(self):
"""
:return: alignment represented as a tuple of aligned strings
"""
m = self.score_matrix.max()
for r in range(len(self.string2) + 1):
for c in range(len(self.string1) + 1):
if self.score_matrix[r][c] == m:
return self.rec_alignments(r, c)[0]
return None
def is_residue_aligned(self, string_number, residue_index):
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been alined
False otherwise
"""
m = self.score_matrix.max()
with suppress(Exception):
for r in range(len(self.string2) + 1):
for c in range(len(self.string1) + 1):
if self.score_matrix[r][c] == m:
raise Exception()
alignment = self.get_alignment()
if alignment is None:
return False
a = alignment[string_number-1]
a = a.replace('-', '')
end = [c, r][string_number-1]
start = end - len(a)
return start <= residue_index < end
<file_sep>##############
# Exercise 2.6
##############
from collections import Counter
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
self.counts = len(self.file)
return self.counts
def get_average_length(self):
overalllength = 0
for elements in self.file:
x, y = elements
overalllength += len(y)
return overalllength/len(self.file)
def read_fasta(self, path):
header = ""
body = ""
listine = []
with open(path,"r") as file:
for line in file:
if line[0] == ">" or line[0] == ";":
if body != "":
listine.append((header,body))
body = ""
header = line.replace('\n','')
else:
header += line.replace('\n','')
else:
body += line.replace('\n','').replace('*','')
listine.append((header,body))
self.file = listine
def get_abs_frequencies(self):
all = ''
for elements in self.file:
x, y = elements
all += y
self.absfreq = Counter(all)
return self.absfreq
def get_av_frequencies(self):
absfreqer = self.get_abs_frequencies()
count = 0
correctness = 0
for element in list(absfreqer.values()):
count += element
for element in absfreqer.keys():
absfreqer[element] = absfreqer[element]/count
for element in absfreqer.values():
correctness += element
print('Correct:? ',correctness)
return absfreqer
<file_sep>##############
# Exercise 2.7
##############
import enum
class AAProp(enum.Enum):
negative_charge = 1
positive_charge = 2
hydrophobic = 3
aromatic = 4
nonpolar = 5
proline = 6
acid = 7
basic = 8
hasSulfur = 9
aa_properties_dict ={
AAProp.negative_charge: ['D', 'E'],
AAProp.positive_charge: ['R', 'H', 'K'],
AAProp.hydrophobic: ['A', 'F', 'I', 'L', 'M', 'V', 'W', 'Y'],#AAProp.hydrophobic: ['A', 'C', 'F', 'G', 'I', 'L', 'M', 'P', 'V', 'W'],
AAProp.aromatic: ['F', 'H', 'W', 'Y'],
AAProp.nonpolar: ['V', 'W', 'P', 'F', 'M', 'L', 'I', 'G', 'C', 'A'],
AAProp.proline: ['P'],
AAProp.acid: ['D', 'E'],#AAProp.acid: ['C', 'D', 'E', 'Y'],
AAProp.basic: ['H', 'K', 'R'],
AAProp.hasSulfur: ['C', 'M'],
}
def isInList(elemList, elem):
isInList = False
if elemList.count(elem) != 0:
isInList = True
return isInList
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
return isInList(aa_properties_dict[AAProp.positive_charge], aa.upper())
def isNegativelyCharged(aa):
return isInList(aa_properties_dict[AAProp.negative_charge], aa.upper())
def isHydrophobic(aa):
return isInList(aa_properties_dict[AAProp.hydrophobic], aa.upper())
def isAromatic(aa):
return isInList(aa_properties_dict[AAProp.aromatic], aa.upper())
def isPolar(aa):
return not isInList(aa_properties_dict[AAProp.nonpolar], aa.upper())
def isProline(aa):
return isInList(aa_properties_dict[AAProp.proline], aa.upper())
def containsSulfur(aa):
return isInList(aa_properties_dict[AAProp.hasSulfur], aa.upper())
def isAcid(aa):
return isInList(aa_properties_dict[AAProp.acid], aa.upper())
def isBasic(aa):
return isInList(aa_properties_dict[AAProp.basic], aa.upper())
# def main():
# print ("isPositivelyCharged? " + str(isPositivelyCharged('r')))
# print ("isNegativelyCharged? " + str(isNegativelyCharged('r')))
# print ("isCharged? " + str(isCharged('r')))
# print ("isHydrophobic? " + str(isHydrophobic('c')))
# print ("isAromatic? " + str((isAromatic('m'))))
# print ("isPolar? " + str(isPolar('c')))
# print ("isProline? " + str(isProline('p')))
# print ("isAcid? " + str(isAcid('y')))
# print ("isBasic? " + str(isBasic('c')))
# print ("containsSulfur? " + str(containsSulfur('a')))
# main()
<file_sep>##############
# Exercise 2.7
##############
hydro = ['A','I','L','M','V','F','W','Y']
aroma = ['F','W','Y','H']
Polar = ['N','Y','Q','S','T','R','H','K','D','E']
proline = ['P']
sulphur = ['M','C']
acid = ['D','E']
base = ['R','H','K']
charged = ['D','E','R','H','K']
positive = ['R','H','K']
negative = ['D','E']
def isCharged(str):
if str in charged:
return True
else:
return False
def isPositivelyCharged(str):
if str in positive:
return True
else:
return False
def isNegativelyCharged(str):
if str in negative:
return True
else:
return False
def isHydrophobic(str):
if str in hydro:
return True
else:
return False
def isAromatic(str):
if str in aroma:
return True
else:
return False
def isPolar(str):
if str in Polar:
return True
else:
return False
def isProline(str):
if str in proline:
return True
else:
return False
def containsSulfur(str):
if str in sulphur:
return True
else:
return False
def isAcid(str):
if str in acid:
return True
else:
return False
def isBasic(str):
if str in base:
return True
else:
return False
<file_sep>import numpy as np
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substituion_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.aligment_matrix = np.zeros((len(string2) + 1, len(string1) + 1, 3), dtype=np.int)
self.align()
def align(self):
"""
Align given strings using the Smith-Waterman algorithm.
NB: score matrix and the substitution matrix are different matrices!
"""
self.aligment_matrix[0, 1:, 1] = np.full(len(self.string1), 2)
self.aligment_matrix[1:, 0, 2] = np.full(len(self.string2), 3)
self.score_matrix[0, 1:] = np.zeros(shape=len(self.string1))
self.score_matrix[1:, 0] = np.zeros(shape=len(self.string2))
for f in range(1, len(self.string1) + 1):
for s in range(1, len(self.string2) + 1):
score_list = [(self.score_matrix[s - 1][f - 1] + self.substituion_matrix.get(self.string1[f - 1])[
self.string2[s - 1]], 1),
(self.score_matrix[s - 1][f] + self.gap_penalty, 2),
(self.score_matrix[s][f - 1] + self.gap_penalty, 3)]
score_matrix_score, max_value = max(score_list, key=lambda x: x[0])
self.score_matrix[s, f] = score_matrix_score
t = [score_list[i][1] for i in range(len(score_list)) if score_list[i][0] == score_matrix_score]
for item in t:
self.aligment_matrix[s, f, item - 1] = item
self.score_matrix[[self.score_matrix<0]]=0
return self.score_matrix
def has_alignment(self):
"""
:return: True if a local alignment has been found, False otherwise
"""
return True if self.score_matrix.max() > 0 else False
def get_alignment(self):
"""
:return: alignment represented as a tuple of aligned strings
"""
def find_ali(s, f, path):
newPath = False
if self.score_matrix[s,f] == 0:
return path
if s == 0 or f == 0:
return path
if self.aligment_matrix[s, f, 0] == 1:
newPath = True
path.append((self.string2[s - 1], self.string1[f - 1]))
find_ali(s - 1, f - 1, path)
if self.aligment_matrix[s, f, 1] == 2:
if newPath:
path.append(("//","//"))
newPath = True
path.append((self.string2[s - 1], "-"))
find_ali(s - 1, f, path)
if self.aligment_matrix[s, f, 2] == 3:
if newPath:
path.append(("//", "//"))
path.append(("-", self.string1[f - 1]))
find_ali(s, f-1, path)
alligent_list = []
a,b = np.unravel_index(np.argmax(self.score_matrix), self.score_matrix.shape)
find_ali(a,b,alligent_list)
word1 = ""
word2 = ""
split_list = []
for s1,s2 in alligent_list:
if s1 == "//":
split_list.append((word2,word1))
word1 = ""
word2 = ""
else:
word1 += s1
word2 += s2
split_list.append((word2,word1))
for index, word in enumerate(split_list[1:]):
previous_word_s1 = split_list[index][0]
previous_word_s2 = split_list[index][1]
split_list[index+1] = (previous_word_s1[0:len(previous_word_s1)-len(word[0])]+word[0], previous_word_s2[0:len(previous_word_s2)-len(word[1])]+word[1])
final_list = list(map(lambda x: (x[0][::-1],x[1][::-1]), split_list))
return final_list[0]
def is_residue_aligned(self, string_number, residue_index):
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been alined
False otherwise
"""
t = self.get_alignment()
b, a = np.unravel_index(np.argmax(self.score_matrix), self.score_matrix.shape)
if string_number == 1:
if a-len(t[0]) < residue_index < a:
return True
else:
return False
else:
if b-len(t[1]) < residue_index < b:
return True
else:
return False
<file_sep>##############
# Exercise 2.7
##############
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
pos_charged = ['R', 'K', 'H']
if aa in pos_charged:
return True
else:
return False
def isNegativelyCharged(aa):
neg_charged = ['D', 'E']
if aa in neg_charged:
return True
else:
return False
def isHydrophobic(aa):
hydrophobe = ['A', 'I', 'L', 'M', 'F', 'V', 'Y', 'W']
if aa in hydrophobe:
return True
else:
return False
def isAromatic(aa):
aromatic = ['H', 'F', 'W', 'Y']
if aa in aromatic:
return True
else:
return False
def isPolar(aa):
polar =['R', 'N','D','Q', 'E', 'H', 'K', 'S', 'T', 'Y']
if aa in polar:
return True
else:
return False
def isProline(aa):
if aa == 'P':
return True
else:
return False
def containsSulfur(aa):
sulphur = ['C', 'M']
if aa in sulphur:
return True
else:
return False
def isAcid(aa):
acid = ['D', 'E']
if aa in acid:
return True
else:
return False
def isBasic(aa):
basic = ['R', 'H', 'K']
if aa in basic:
return True
else:
return False
<file_sep>##############
# Exercise 2.6
##############
import string
a = string.ascii_uppercase
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
return len(self.__sequences)
def get_average_length(self):
bla = 0
for gurke in self.__sequences:
bla+= len(gurke)
return bla/len(self.__sequences)
def read_fasta(self, filepath):
with open(filepath, "r") as f:
seq = ""
sequence_started = False
for line in f:
if sequence_started:
seq += line.strip().replace("*","")
if line in ['\n', '\r\n']:
self.__sequences.append(seq)
seq = ""
sequence_started = False
if line.startswith(">") or line.startswith(";"):
sequence_started = True
continue
self.__sequences.append(seq)
print(len(self.__sequences))
def get_abs_frequencies(self):
ret = {}
for b in a:
ret[b] = ''.join(self.__sequences).count(b)
return ret
def get_av_frequencies(self):
ret = {}
for b in a:
ret[b] = ''.join(self.__sequences).count(b)/len(''.join(self.__sequences))
return ret
bla = AADist("tests/tests.fasta")
<file_sep>##############
# Exercise 1.6
##############
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
if aa in isPositivelyCharged_list:
return True
else:
return False
def isNegativelyCharged(aa):
if aa in isNegativelyCharged_list:
return True
else:
return False
def isHydrophobic(aa):
if aa in isHydrophobic_list:
return True
else:
return False
def isAromatic(aa):
if aa in isAromatic_list:
return True
else:
return False
def isPolar(aa):
if aa in isPolar_list:
return True
else:
return False
def isProline(aa):
if aa=='P':
return True
else:
return False
def containsSulfur(aa):
if aa in containsSulfur_list:
return True
else:
return False
def isAcid(aa):
if aa in isAcid_list:
return True
else:
return False
def isBasic(aa):
if aa in isBasic_list:
return True
else:
return False
###AA Properties here, saved as lists###
isPositivelyCharged_list = ['H','K','R']
isNegativelyCharged_list = ['D','E']
isHydrophobic_list = ['A','F','I','L','M','V','W','Y']
isAromatic_list = ['F','H','W','Y']
isPolar_list =['D','E','H','K','N','Q','R','S','T','Y']
containsSulfur_list = ['C','M']
isAcid_list = ['D','E']
isBasic_list = ['H','K','R']
<file_sep>import numpy as np
import itertools
import operator
from pathlib import Path
import re
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.structure = []
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
self.structure.append(sequence)
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
return list(filter(lambda s: word in s, self.structure))
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
s = set()
p = 0
for sequence in self.structure:
sub = set()
for i in range(0, len(sequence)):
if i + 3 > len(sequence):
break
word = sequence[i: i + 3]
s.add(word)
sub.add(word)
p = p + len(sub)
u = len(self.structure)
o = len(s)
p = p / u
a = 0
for w in s:
for sequence in self.structure:
if w in sequence:
a += 1
a = a / o
return u, o, round(p), round(a)
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.words = []
for a in ALPHABET:
for b in ALPHABET:
for c in ALPHABET:
self.words.append(a + b + c)
self.substitution_matrix = substitution_matrix
# self.words = list(map(lambda s: ''.join(s), list(itertools.combinations(ALPHABET, 3))))
def get_score(self, a, b):
score = 0
for i in range(0, 3):
score += self.substitution_matrix[AA_TO_INT[a[i]]][AA_TO_INT[b[i]]]
return score
def get_score_pssm(self, word, query, pssm):
score = 0
for i in range(0, 3):
x = query[i]
y = AA_TO_INT[word[i]]
score += pssm[x][y]
return score
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
s = {}
if sequence is not None:
for word in self.words:
for i in range(0, len(sequence) + 1):
if i + 3 > len(sequence):
break
query = sequence[i:i + 3]
score = self.get_score(query, word)
if score >= T:
if word in s:
current = s[word]
if current < score:
s[word] = score
else:
s[word] = score
else:
for word in self.words:
for i in range(0, len(pssm) + 1):
if i + 3 > len(pssm):
break
query = range(i, i + 3)
score = self.get_score_pssm(word, query, pssm)
if score >= T:
if word in s:
current = s[word]
if current < score:
s[word] = score
else:
s[word] = score
result = sorted(s.items(), key=operator.itemgetter(1), reverse=True)
result = list(map(lambda w: w[0], result))
return result
def traverse(self, query, target, i, j, X, side, start):
k = 0
top_score = start
score = start
HSP = ''
best = ''
while True:
q_i = i + k
t_j = j + k
if q_i < 0 or q_i >= len(query) or t_j < 0 or t_j >= len(target):
break
q = query[q_i]
t = target[t_j]
HSP += t
score += self.substitution_matrix[AA_TO_INT[t]][AA_TO_INT[q]]
if X <= top_score - score:
break
if score > top_score:
top_score = score
best = HSP
k += side
return top_score, best
def traverse_pssm(self, pssm, target, i, j, X, side, start):
k = 0
top_score = start
score = start
HSP = ''
best = ''
while True:
q_i = i + k
t_j = j + k
if q_i < 0 or q_i >= len(pssm) or t_j < 0 or t_j >= len(target):
break
t = target[t_j]
HSP += t
score += pssm[q_i][AA_TO_INT[t]]
if X <= top_score - score:
break
if score > top_score:
top_score = score
best = HSP
k += side
return top_score, best
def find(self, query, *patterns):
index = []
for pattern in patterns:
for i in range(0, len(query) - 2):
if query[i:i+3] == pattern:
index.append(i)
return index
def inside(self, tuples, tup):
for i in tuples:
if i == tup:
return True
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
RIGHT = 1
LEFT = -1
result = {}
if query is not None:
for word in self.get_words(sequence=query, T=T):
for i in range(0, len(query) - 2):
if self.get_score(query[i:i+3], word) >= T:
for target in blast_db.get_sequences(word):
for j in self.find(target, word):
start = self.get_score(query[i:i+3], word)
(right, hsp_r) = self.traverse(query, target, i + 3, j + 3, X, RIGHT, start)
(score, hsp_l) = self.traverse(query, target, i - 1, j - 1, X, LEFT, right)
hsp = hsp_l[::-1] + hsp_r
tup = (i - len(hsp_l), j - len(hsp_l), len(hsp) + 3, score)
if score >= S:
if target in result:
if not self.inside(result[target], tup):
result[target].append(tup)
else:
result[target] = [tup]
else:
for word in self.get_words(sequence=None, pssm=pssm, T=T):
for i in range(0, len(pssm) - 2):
if self.get_score_pssm(word, range(i, i + 3), pssm) >= T:
for target in blast_db.get_sequences(word):
for j in self.find(target, word):
start = self.get_score_pssm(word, range(i, i + 3), pssm)
(right, hsp_r) = self.traverse_pssm(pssm, target, i + 3, j + 3, X, RIGHT, start)
(score, hsp_l) = self.traverse_pssm(pssm, target, i - 1, j - 1, X, LEFT, right)
hsp = hsp_l[::-1] + hsp_r
tup = (i - len(hsp_l), j - len(hsp_l), len(hsp) + 3, score)
if score >= S:
if target in result:
if not self.inside(result[target], tup):
result[target].append(tup)
else:
result[target] = [tup]
return result
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]
<file_sep>import numpy as np
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substituion_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.align()
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
self.score_matrix = self.get_score_matrix()
self.alignments = self.get_alignments()
def get_best_score(self):
"""
:return: the highest score for the aligned strings, int
"""
res=-1
for align in self.alignments:
rev1 = align[0][::-1]
rev2 = align[1][::-1]
score=0
for i in range(len(rev1)):
if rev1[i]=='-' or rev2[i]=='-':
score+=self.gap_penalty
else:
score+=self.substituion_matrix[rev1[i]][rev2[i]]
if(score>res):
res=score
return res
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score
"""
return len(self.alignments)
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
graph = self.build_graph()
tracks = self.find_all_paths(graph, (self.score_matrix.shape[0]-1,self.score_matrix.shape[1]-1),(0, 0))
seq1 = self.string2
seq2 = self.string1
alignments = []
for i in tracks:
bs1=""
bs2 =""
for j in range(len(i)):
if (i[j][0] > 0 and i[j][1]>0):
if (i[j][0]==i[j+1][0]):
bs1='-' + bs1
bs2=seq2[i[j][1]-1]+bs2
elif(i[j][1]==i[j+1][1]):
bs2='-'+bs2
bs1=seq1[i[j][0]-1]+bs1
else:
bs1=seq1[i[j][0]-1]+bs1
bs2=seq2[i[j][1]-1]+bs2
alignments.append((bs2,bs1))
return alignments
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as a list of lists
"""
mat = self.score_matrix
pen = self.gap_penalty
seq1 = self.string2
seq2 = self.string1
for i in range(mat.shape[0]):
mat[i][0] = pen*i
for j in range(mat.shape[1]):
mat[0][j] = pen*j
for i in range(1,mat.shape[0]):
for j in range(1,mat.shape[1]):
d=mat[i-1][j]+pen
ii=mat[i][j-1]+pen
if (seq1[i-1] == '-' or seq2[j-1] == '-'):
m=pen
else:
m= self.substituion_matrix[seq1[i-1]][seq2[j-1]]
m+=mat[i-1][j-1]
mat[i][j]=max(d,ii,m)
#
# ll=[]
# for i in range(mat.shape[0]):
# ll.append(list(mat[i,:]))
return mat
def build_graph(self):
seq1=self.string2
seq2=self.string1
graph={}
for i in range(1,self.score_matrix.shape[0])[::-1]:
graph[(i,0)] = [(i-1,0)]
for j in range(1,self.score_matrix.shape[1])[::-1]:
graph[(0,j)]=[(0,j-1)]
graph[(i,j)]=[]
score_up = self.score_matrix[i][j-1]
score_diag = self.score_matrix[i-1][j - 1]
score_left = self.score_matrix[i - 1][j]
score = self.score_matrix[i][j]
pen=self.gap_penalty
if score_diag + self.substituion_matrix[seq1[i - 1]][seq2[j - 1]]==score:
graph[(i, j)]=graph[(i, j)]+[(i-1,j-1)]
if score==score_left+pen:
graph[(i, j)]=graph[(i, j)]+[(i - 1, j)]
if score==score_up+pen:
graph[(i, j)]= graph[(i, j)]+[(i, j - 1)]
return graph
def find_all_paths(self,graph,beg,end,path=[]):
path = path + [beg]
if beg == end:
return [path]
if beg not in graph:
return []
paths = []
for node in graph[beg]:
if node not in path:
np = self.find_all_paths(graph, node, end, path)
for k in np:
paths.append(k)
return paths
<file_sep>import numpy as np
import math
from collections import Counter
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
msa=None
print(sequences)
if sequences==[] or len(sequences)<3:
print("raise TypeError")
raise TypeError("empty sequences")
return msa
self.length=len(sequences[0])
self.oc=Counter()
for i in sequences:
if len(i)!= self.length or not(set(i)<=set(ALPHABET)):
print("raise TypeError")
raise TypeError("sequences have different length")
return msa
self.msalength=len(sequences)
self.primseqgap=sequences[0]
self.primaryseq=sequences[0].replace('-','')
self.r=len(self.oc)
self.sequences = sequences
#msa=sequences
return msa
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
#print(bg_matrix)
print(beta)
print(use_sequence_weights)
print(redistribute_gaps)
print(add_pseudocounts)
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
#pseudocounts = np.zeros(len[ALPHABET])
pssm = np.zeros((len(self.sequences[0]),len(ALPHABET)))
#print(self.sequences)
# calculate weights
if use_sequence_weights:
for i in range(0, len(pssm)):
#occurencies=self.count_occurencies(i)
#count = occurencies
for j in range(0, (len(pssm[i]))):
for x in range(0,len(self.sequences)):
if ALPHABET[j] == self.sequences[x][i]:
pssm[i][j]+=self.weights[x]
# if ALPHABET[j] == self.sequences[x][i]:
# pssm[i][j]+=self.weights[x]
#print(pssm)
else:
for i in range(0, len(pssm)):
occurencies=self.count_occurencies(i)
count = occurencies
for j in range(0, (len(pssm[i]))):
del count['-']
pssm[i][j] = occurencies[ALPHABET[j]]/len(list(count.elements()))
print (pssm)
if redistribute_gaps:
for i in range(0, len(pssm)):
# gap not in bg matrix
for j in range(0, (len(pssm[i])-1)):
if pssm[i][len(ALPHABET)-1]!=0:
if bg_matrix == None:
pssm [i][j] = pssm[i][len(ALPHABET)-1]*0.05
else:
pssm [i][j] = pssm[i][len(ALPHABET)-1]*bg_matrix[AA_TO_INT[ALPHABET[j]]][AA_TO_INT[ALPHABET[j]]]
pseudo = np.zeros((len(self.sequences[0]),len(ALPHABET)))
#self.get_sequence_weights()
alpha=self.get_number_of_observations()-1
if add_pseudocounts:
for i in range(0, len(pseudo)):
# gap not in bg matrix
for j in range(0, (len(pseudo[i])-1)):
occurencies=self.count_occurencies(j)
count = occurencies
if bg_matrix == None:
pseudo[i][j] = occurencies[ALPHABET[j]]/0.05
else:
pseudo[i][j] = occurencies[ALPHABET[j]]/bg_matrix[AA_TO_INT[ALPHABET[j]]][AA_TO_INT[ALPHABET[j]]]
pssm[i][j]+=pseudo[i][j]
# pssm[i][j]=(alpha*pssm[i][j]+beta*pseudo[i][j])/(alpha+beta)
adjustfr=False
#adjust frequencies
if adjustfr:
for i in range(0, len(pssm)):
# gap not in bg matrix
for j in range(0, (len(pssm[i])-1)):
if bg_matrix == None:
pseudo[i][j]=0.05
pssm[i][j]=(alpha*pssm[i][j]+beta*pseudo[i][j])/(alpha+beta)
print("adjust")
print(pssm)
"""
if bg_matrix == None:
for i in range(0, len(pssm)):
for j in range(0, (len(pssm[i]))):
if pssm[i][j] != '-':
pssm[i][j] *=0.05
else:
for i in range(0, len(pssm)):
for j in range(0, (len(pssm[i]))):
pssm[i][j]*=bg_matrix[AA_TO_INT[ALPHABET[j]]][AA_TO_INT[ALPHABET[j]]]
"""
#divide by background frequency???
for i in range(0, len(pssm)):
# gap not in bg matrix
for j in range(0, (len(pssm[i])-1)):
if bg_matrix == None:
pssm [i][j] /=0.05
else:
pssm [i][j] /=bg_matrix[AA_TO_INT[ALPHABET[j]]][AA_TO_INT[ALPHABET[j]]]
print("divide")
print(pssm)
logscore=True
#log score
if logscore:
for i in range(0, len(pssm)):
# gap not in bg matrix
for j in range(0, (len(pssm[i])-1)):
if bg_matrix == None:
value = pssm[i][j]#/0.5
else:
value = pssm[i][j]#/bg_matrix[AA_TO_INT[ALPHABET[j]]][AA_TO_INT[ALPHABET[j]]]
if value >0:
pssm [i][j] = 2*math.log(value,2)
else:
pssm [i][j]= -20
#remove gap row
#p=np.append(i)
#print(p)
pssm=np.delete(pssm, len(pssm[0])-1,1)
#remove rows corresponding to gaps
prim_gaps = self.primseqgap
while(prim_gaps.find('-')>-1):
curr_gap = prim_gaps.find('-')
pssm=np.delete(pssm, curr_gap, 0)
prim_gaps=prim_gaps[:curr_gap]+prim_gaps[curr_gap+1:]
print(np.rint(pssm).astype(np.int64)[0,:])
return np.rint(pssm).astype(np.int64)
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return (self.msalength, self.length)
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
return self.primaryseq
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.len(self.sequences[i])
"""
weights = np.zeros(len(self.sequences))
detailed_weights = np.zeros((len(self.sequences),len(self.sequences[0])))
self.occurency_table = np.zeros(len(self.sequences[0]))
for i in range(0, len(self.sequences)):
for j in range(0, len(self.sequences[i])):
occurencies = self.count_occurencies(j)
self.occurency_table[j]= len(occurencies)
if len(occurencies)==1:
detailed_weights [i] [j]=0
else:
detailed_weights [i] [j] = 1/(occurencies[self.sequences[i][j]]*len(occurencies))
if ((detailed_weights [i] [j]>0.13) and (detailed_weights [i] [j]<0.14)):
print(self.sequences[i][j])
for k in range(0, len(detailed_weights)):
for l in range(0, len(detailed_weights[0])):
weights[k]+=detailed_weights[k][l]
self.detailed_weights =detailed_weights
self.weights=weights
return weights.astype(np.float64)
#return weights.astype(np.float64)
def count_occurencies(self, position):
occurencies=Counter()
for i in self.sequences:
occurencies+=Counter(i[position])
return occurencies
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
sum_r=0
for i in self.occurency_table:
sum_r +=i
num_obs = 1/self.length*sum_r
return num_obs.astype(np.float64)
<file_sep>import numpy as np
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
if len(sequences) == 0:
raise TypeError("The MSA contains 0 valid sequences")
for i in range(1, len(sequences)):
if len(sequences[i]) != len(sequences[i-1]):
raise TypeError("The sequences have different lengths")
for seq in sequences:
for elem in seq:
if elem not in ALPHABET:
raise TypeError("A sequence contains an invalid element")
self.sequences = sequences
# count observed amino acids
self.pssm = np.zeros((len(self.get_primary_sequence()), 21))
index = 0
for i in range(0, len(self.sequences[0])):
if self.sequences[0][i] == '-':
continue
for seq in self.sequences:
self.pssm[index][AA_TO_INT[seq[i]]] += 1
index += 1
def normalize_divide_log_real(self, pssm, bg_matrix):
bg_vector = np.zeros(20)
for i in range(0, 20):
sum = 0
for j in range(0, 20):
sum = sum + bg_matrix[i][j]
bg_vector[i] = sum
for row in range(0, len(self.get_primary_sequence())):
sum = np.sum(pssm[row])
for i in range(0, 20):
pssm[row, i] = float(pssm[row, i] / sum)
pssm[row, i] = float(pssm[row, i] / bg_vector[i])
if pssm[row, i] != 0:
pssm[row, i] = float(2 * np.log2(pssm[row, i]))
else:
pssm[row, i] = -20
return pssm
def normalize_divide_log(self, pssm):
for row in range(0, len(self.get_primary_sequence())):
sum = np.sum(pssm[row])
for i in range(0, 20):
pssm[row, i] = float(pssm[row, i] / sum)
pssm[row, i] = float(pssm[row, i] / 0.05)
if pssm[row, i] != 0:
pssm[row, i] = float(2 * np.log2(pssm[row, i]))
else:
pssm[row, i] = -20
return pssm
def count_weights(self, pssm, weights):
index = 0
for i in range(0, len(self.sequences[0])):
if self.sequences[0][i] == '-':
continue
for j in range(0, self.get_size()[0]):
if self.sequences[j][i] != '-':
pssm[index][AA_TO_INT[self.sequences[j][i]]] += 1 * weights[j]
index += 1
return pssm
def redistribute(self, pssm, bg_matrix):
bg_vector = np.zeros(20)
for i in range(0, 20):
sum = 0
for j in range(0, 20):
sum = sum + bg_matrix[i][j]
bg_vector[i] = sum
for row in range(0, len(self.get_primary_sequence())):
gap = self.pssm[row][-1]
for col in range(0, 20):
pssm[row][col] = float(pssm[row][col] + (gap * bg_vector[col]))
return pssm
def redistribute_no_bg(self, pssm):
for row in range(0, len(self.get_primary_sequence())):
gap = self.pssm[row][-1]
for col in range(0, 20):
pssm[row][col] = float(pssm[row][col] + (gap * 0.05))
return pssm
def calculate_pseudocounts(self, pssm, bg_matrix):
bg_vector = np.zeros(20)
for i in range(0, 20):
sum = 0
for j in range(0, 20):
sum = sum + bg_matrix[i][j]
bg_vector[i] = sum
for row in range(0, len(self.get_primary_sequence())):
for col in range(0, 20):
pssm[row, col] += float(pssm[row, col] / bg_vector[col] * bg_matrix[INT_TO_AA[col], INT_TO_AA[row]])
return pssm
def adjust_frequencies(self, pssm, beta):
#calculate independent observations
columns = len(self.sequences[0])
observations = 1 / columns
sum = 0
for i in range(0, columns):
counter = 0
observed = ""
for j in range(0, len(self.get_primary_sequence())):
if str(self.sequences[j][i]) not in observed:
observed = observed + self.sequences[j][i]
counter += 1
sum += counter
observations = observations * sum
#calculate adjusted frequencies
alpha = observations - 1
for i in range(0, len(self.get_primary_sequence())):
for j in range(0, 20):
pssm[i][j] = (alpha * pssm[i]+ beta * pssm[i]) / (alpha + beta)
return pssm
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
#bg_matrix only
if bg_matrix is not None and use_sequence_weights is False and redistribute_gaps is False and add_pseudocounts is False:
pssm = np.zeros((len(self.get_primary_sequence()), 20))
# count observed amino acids
index = 0
for i in range(0, len(self.sequences[0])):
if self.sequences[0][i] == '-':
continue
for seq in self.sequences:
if seq[i] != '-':
pssm[index][AA_TO_INT[seq[i]]] += 1
index += 1
# normalize, divide by background frequency and transform in negative/positive values
pssm = self.normalize_divide_log_real(pssm, bg_matrix)
return np.rint(pssm).astype(np.int64)
#redistribut_gaps and bg_matrix
if redistribute_gaps and bg_matrix is not None and use_sequence_weights is False and add_pseudocounts is False:
pssm = np.zeros((len(self.get_primary_sequence()), 20))
# count observed amino acids
index = 0
for i in range(0, len(self.sequences[0])):
if self.sequences[0][i] == '-':
continue
for seq in self.sequences:
if seq[i] != '-':
pssm[index][AA_TO_INT[seq[i]]] += 1
index += 1
# redistribute the gaps according to the background frequency
pssm = self.redistribute(pssm, bg_matrix)
# normalize, divide by background frequency and transform in negative/positive values
pssm = self.normalize_divide_log_real(pssm, bg_matrix)
return np.rint(pssm).astype(np.int64)
#redistribute_gaps only
if redistribute_gaps and bg_matrix is None and use_sequence_weights is False and add_pseudocounts is False:
pssm = np.zeros((len(self.get_primary_sequence()), 20))
# count observed amino acids
index = 0
for i in range(0, len(self.sequences[0])):
if self.sequences[0][i] == '-':
continue
for seq in self.sequences:
if seq[i] != '-':
pssm[index][AA_TO_INT[seq[i]]] += 1
index += 1
# redistribute the gaps according to the background frequency
pssm = self.redistribute_no_bg(pssm)
# normalize, divide by background frequency and transform in negative/positive values
pssm = self.normalize_divide_log(pssm)
return np.rint(pssm).astype(np.int64)
#weighted only
if use_sequence_weights and bg_matrix is None and redistribute_gaps is False and add_pseudocounts is False:
pssm = np.zeros((len(self.get_primary_sequence()), 20))
weights = self.get_sequence_weights()
# count observed amino acids with weigths
pssm = self.count_weights(pssm, weights)
# normalize, divide by background frequency and transform in negative/positive values
pssm = self.normalize_divide_log(pssm)
return np.rint(pssm).astype(np.int64)
#use_sequence_weigts and bg_matrix
if bg_matrix is not None and use_sequence_weights and add_pseudocounts is False and redistribute_gaps is False:
pssm = np.zeros((len(self.get_primary_sequence()), 20))
weights = self.get_sequence_weights()
# count observed amino acids with weigths
pssm = self.count_weights(pssm, weights)
# normalize, divide by background frequency and transform in negative/positive values
pssm = self.normalize_divide_log_real(pssm, bg_matrix)
return np.rint(pssm).astype(np.int64)
#pseducounts only
if add_pseudocounts and bg_matrix is None and redistribute_gaps is False and use_sequence_weights is False:
pssm = np.zeros((len(self.get_primary_sequence()), 20))
# count observed amino acids
index = 0
for i in range(0, len(self.sequences[0])):
if self.sequences[0][i] == '-':
continue
for seq in self.sequences:
if seq[i] != '-':
pssm[index][AA_TO_INT[seq[i]]] += 1
index += 1
# pssm = self.calculate_pseudocounts(pssm, bg_matrix)
pssm = self.adjust_frequencies(pssm, beta)
return np.rint(pssm).astype(np.int64)
#pseudocounts and redistribute gaps and bg matrix
if add_pseudocounts and bg_matrix is not None and redistribute_gaps and use_sequence_weights is False:
pssm = np.zeros((len(self.get_primary_sequence()), 20))
# count observed amino acids
index = 0
for i in range(0, len(self.sequences[0])):
if self.sequences[0][i] == '-':
continue
for seq in self.sequences:
if seq[i] != '-':
pssm[index][AA_TO_INT[seq[i]]] += 1
index += 1
pssm = self.calculate_pseudocounts(pssm, beta)
pssm = self.adjust_frequencies(pssm, beta)
# normalize, divide by background frequency and transform in negative/positive values
pssm = self.normalize_divide_log_real(pssm, bg_matrix)
return np.rint(pssm).astype(np.int64)
#pseudocounts and sequence_weights and bg matrix
if add_pseudocounts and bg_matrix is not None and redistribute_gaps is False and use_sequence_weights:
pssm = np.zeros((len(self.get_primary_sequence()), 20))
weights = self.get_sequence_weights()
# count observed amino acids with weigths
pssm = self.count_weights(pssm, weights)
return np.rint(pssm).astype(np.int64)
#pseudocounts and sequence weights and bg matrix and redistribute gaps (all)
if add_pseudocounts and bg_matrix is not None and redistribute_gaps and use_sequence_weights:
pssm = np.zeros((len(self.get_primary_sequence()), 20))
weights = self.get_sequence_weights()
# count observed amino acids with weigths
pssm = self.count_weights(pssm, weights)
# redistribute the gaps according to the background frequency
pssm = self.redistribute(pssm, bg_matrix)
return np.rint(pssm).astype(np.int64)
#basic
pssm = np.zeros((len(self.get_primary_sequence()), 20))
# count observed amino acids
index = 0
for i in range(0, len(self.sequences[0])):
if self.sequences[0][i] == '-':
continue
for seq in self.sequences:
if seq[i] != '-':
pssm[index][AA_TO_INT[seq[i]]] += 1
index += 1
# normalize, divide by background frequency and transform in negative/positive values
pssm = self.normalize_divide_log(pssm)
return np.rint(pssm).astype(np.int64)
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return (len(self.sequences), len(self.sequences[0]))
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
ungapped_primary_sequence = self.sequences[0].replace("-","")
return ungapped_primary_sequence
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
partial = np.zeros((self.get_size()[1], self.get_size()[0] + 1), dtype = float)
weights = np.zeros(self.get_size()[0])
# get the matrix for calculating the weights
for i in range(0, len(self.sequences[0])):
temp = []
for seq in self.sequences:
temp.append(seq[i])
partial[i][-1] = len(set(temp))
for j in range(0, self.get_size()[0]):
num = 0
for seq2 in self.sequences:
if seq2[i] == self.sequences[j][i]:
num +=1
partial[i][j] = num
# apply the formula
for row in range(0, self.get_size()[1]):
for col in range(0, self.get_size()[0]):
partial[row][col] = 1 / (partial[row][col] * partial[row][-1])
# calculate the final weights
for col in range(0,self.get_size()[0]):
res = 0.0
for row in range(0, self.get_size()[1]):
if partial[row][-1] != 1:
res += partial[row][col]
weights[col] = res
return weights.astype(np.float64)
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
L = self.get_size()[1]
r = np.zeros(L)
for i in range(0, L):
temp = []
for seq in self.sequences:
temp.append(seq[i])
r[i] = len(set(temp))
num_obs = (1 / L) * np.sum(r)
return np.float64(num_obs)
<file_sep>##############
# Exercise 2.7
##############
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
return aa in "RKH"
def isNegativelyCharged(aa):
return aa in "DE"
def isHydrophobic(aa):
return aa in "VILFWYMA"
def isAromatic(aa):
return aa in "FWYH"
def isPolar(aa):
return aa not in "AGILVFWPCM"
def isProline(aa):
return aa in "P"
def containsSulfur(aa):
return aa in "CM"
def isAcid(aa):
return aa in "DE"
def isBasic(aa):
return aa in "RKH"
<file_sep>##############
# Exercise 2.5
##############
# You can use the supplied test cases for your own testing. Good luck!
def get_orfs(genome):
for alphabet in genome:
if alphabet not in ("A", "C", "T", "G"):
raise TypeError()
<file_sep>def isCharged(aa: str):
if aa == "R" or aa == "H" or aa == "K" or aa == "D" or aa == "E":
return True
else:
return False
def isPositivelyCharged(aa: str):
if aa == "R" or aa == "H" or aa == "K":
return True
else:
return False
def isNegativelyCharged(aa: str):
if aa == "D" or aa == "E":
return True
else:
return False
def isHydrophobic(aa: str):
if aa == "A" or aa == "V" or aa == "I" or aa == "L" or aa == "M" or aa == "F" or aa == "Y" or aa == "W":
return True
else:
return False
def isAromatic(aa: str):
if aa == "H" or aa == "F" or aa == "W" or aa == "Y":
return True
else:
return False
def isPolar(aa: str):
if aa == "R" or aa == "N" or aa == "D" or aa == "Q" or aa == "E" or aa == "H" or aa == "K" or aa == "S" or aa == "T" or aa == "Y":
return True
else:
return False
def isProline(aa: str):
if aa == "P":
return True
else:
return False
def containsSulfur(aa: str):
if aa == "C" or aa == "M":
return True
else:
return False
def isAcid(aa: str):
if aa == "D" or aa == "E":
return True
else:
return False
def isBasic(aa: str):
if aa == "R" or aa == "H" or aa == "K":
return True
else:
return False
<file_sep>import numpy as np
from pathlib import Path
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.sequence_list = []
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
self.sequence_list.append(sequence)
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
s_list = []
for sequence in self.sequence_list:
if word in sequence:
s_list.append(sequence)
return s_list
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
word_set = set([])
count = 0
for sequence in self.sequence_list:
current_word_set = set([])
for idx in range(len(sequence)-2):
word = sequence[idx:idx+3]
if word not in current_word_set:
count += 1
word_set.add(word)
current_word_set.add(word)
word_count = 0
for sequence in self.sequence_list:
diff_words = set([])
for idx in range(len(sequence)-2):
diff_words.add(sequence[idx:idx+3])
word_count += len(diff_words)
return tuple(list([len(self.sequence_list), len(word_set), round(word_count/len(self.sequence_list)), round(count/len(word_set))]))
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.substitution_matrix = substitution_matrix
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
if pssm is None:
seeds = set([])
for k in range(len(sequence)-2):
for ind1 in range(20):
for ind2 in range(20):
for ind3 in range(20):
seed = INT_TO_AA[ind1] + \
INT_TO_AA[ind2] + INT_TO_AA[ind3]
first = AA_TO_INT[sequence[k]]
second = AA_TO_INT[sequence[k+1]]
third = AA_TO_INT[sequence[k+2]]
score = (
self.substitution_matrix[first][ind1] + self.substitution_matrix[second][ind2] + self.substitution_matrix[third][ind3])
if score >= T:
seeds.add(seed)
return seeds
else:
seeds = set([])
for i in range(len(pssm) - 2):
for ind1 in range(len(pssm[i])):
for ind2 in range(len(pssm[i])):
for ind3 in range(len(pssm[i])):
seed = INT_TO_AA[ind1] + \
INT_TO_AA[ind2] + INT_TO_AA[ind3]
score = (pssm[i][ind1] + pssm[i + 1]
[ind2] + pssm[i + 2][ind3])
if score >= T:
seeds.add(seed)
return seeds
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
if query is not None:
print("HEYYYYY")
word_list = self.get_words(sequence=query, T=T)
for word in word_list:
for index in range(0,len(query)-2):
if pssm is None:
first = AA_TO_INT[query[index]]
second = AA_TO_INT[query[index+1]]
third = AA_TO_INT[query[index+2]]
score = (
self.substitution_matrix[first][AA_TO_INT[word[0]]] + self.substitution_matrix[second][AA_TO_INT[word[1]]] + self.substitution_matrix[third][AA_TO_INT[word[2]]])
else:
score = (pssm[index][AA_TO_INT[word[0]]] + pssm[index + 1]
[AA_TO_INT[word[1]]] + pssm[index + 2][AA_TO_INT[word[2]]])
if score >= T:
q_index = index
score_max = score
hsp_max = word
target_list = blast_db.get_sequences(word)
for target in target_list:
t_index = target.find(word)
#forward
q_index_f = q_index + 3
t_index_f = t_index + 3
hsp = word
while q_index_f < len(query) and t_index_f < len(target):
aa = target[t_index_f]
if pssm is not None:
score += pssm[q_index_f][AA_TO_INT[aa]]
else:
score += self.substitution_matrix[AA_TO_INT[query[q_index_f]]][AA_TO_INT[aa]]
if score_max - score >= X:
break
hsp = hsp + aa
if score > score_max:
score_max = score
hsp_max = hsp
q_index_f += 1
t_index_f += 1
#backward
hsp = hsp_max
q_index_b = q_index - 1
t_index_b = t_index - 1
while t_index_b > -1 and q_index_b > -1:
aa = target[t_index_b]
if pssm is not None:
score += pssm[q_index_b][AA_TO_INT[aa]]
else:
score += self.substitution_matrix[AA_TO_INT[query[q_index_b]]][AA_TO_INT[aa]]
if score_max - score >= X:
break
hsp = aa + hsp
if score > score_max:
score_max = score
hsp_max = hsp
q_index = q_index_b
t_index = t_index_b
t_index_b -= 1
q_index_b -= 1
if score_max > S:
d[target] = (q_index, t_index, len(hsp_max), score_max)
else:
pass
return d
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]
<file_sep>import numpy as np
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.previous_node_matrix = {}
self.alignment_paths = ()
self.path = []
self.m = self.score_matrix.shape[0]
self.n = self.score_matrix.shape[1]
self.max_index = ()
self.max = 0
self.align()
def align(self):
"""
Align given strings using the Smith-Waterman algorithm.
NB: score matrix and the substitution matrix are different matrices!
"""
for i in range(1,self.m,1):
for j in range(1,self.n,1):
self.score_matrix[i,j] = max(0,self.score_matrix[i-1][j-1]+self.substitution_matrix[self.string2[i-1]][self.string1[j-1]],
self.score_matrix[i][j-1]+self.gap_penalty,self.score_matrix[i-1][j]+self.gap_penalty)
if self.score_matrix[i,j] > self.max:
self.max_index = (i,j)
self.max = self.score_matrix[i,j]
prev_path_nodes = None
if self.score_matrix[i][j] > 0:
if i>=2 and j>=2 and self.score_matrix[i,j] == self.score_matrix[i-1][j-1]+self.substitution_matrix[self.string2[i-1]][self.string1[j-1]]:
prev_path_nodes = (i-1,j-1)
elif j >=2 and self.score_matrix[i,j] == self.score_matrix[i][j-1]+self.gap_penalty:
prev_path_nodes = (i,j-1)
elif i >= 2 and self.score_matrix[i,j] == self.score_matrix[i-1][j]+self.gap_penalty:
prev_path_nodes = (i-1,j)
self.previous_node_matrix[(i,j)] = prev_path_nodes
currPath = []
currPath.append((self.m-1,self.n-1))
self.get_alignment()
#self.get_alignments_recursion(currPath)
def has_alignment(self):
"""
:return: True if a local alignment has been found, False otherwise
"""
return self.alignment_paths[0] != ""
def get_alignment(self):
"""
:return: alignment represented as a tuple of aligned strings
"""
if self.max == 0:
self.alignment_paths = ("","")
return self.alignment_paths
curr_index = self.max_index
path = []
while curr_index != None and self.score_matrix[curr_index[0],curr_index[1]] > 0:
path.append(curr_index)
curr_index = self.previous_node_matrix[curr_index]
self.path = path
str1 = ""
str2 = ""
str1_idx = path[-1][1]
str2_idx = path[-1][0]
for node in reversed(path):
if node[0] == str2_idx: #no gaps
str2 += str(self.string2[str2_idx-1])
str2_idx += 1
else: #gap
str2 += "-"
if str2[-1] != "-":
str2_idx += 1
if node[1] == str1_idx: #no gaps
str1 += str(self.string1[str1_idx-1])
str1_idx += 1
else: #gap
str1 += "-"
if str1[-1] != "-":
str1_idx += 1
self.alignment_paths = (str1,str2)
return self.alignment_paths
def is_residue_aligned(self, string_number, residue_index):
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been alined
False otherwise
"""
for node in self.path:
if node[string_number%2] == residue_index+1:
return True
return False
#return False
<file_sep>##############
# Exercise 2.5
##############
from collections import Counter
# You can use the supplied test cases for your own testing. Good luck!
def complementary(item):
COMP = {
'A': 'T',
'T': 'A',
'C': 'G',
'G': 'C'
}
comp=[]
for base in item:
comp.append(COMP[base])
return ''.join(comp)
# Exercise 2.1
amino_acides = {
'TTT': 'F',
'TTC': 'F',
'TTA': 'L',
'TTG': 'L' ,
'CTT': 'L',
'CTC': 'L' ,
'CTA': 'L',
'CTG': 'L',
'ATT': 'I',
'ATC': 'I' ,
'ATA': 'I',
'ATG': 'M' ,
'GTT': 'V',
'GTC': 'V' ,
'GTA': 'V',
'GTG': 'V' ,
'TCT': 'S',
'TCC': 'S' ,
'TCA': 'S',
'TCG': 'S' ,
'CCT': 'P',
'CCC': 'P' ,
'CCA': 'P',
'CCG': 'P' ,
'ACT': 'T',
'ACC': 'T' ,
'ACA': 'T',
'ACG': 'T' ,
'GCT': 'A',
'GCC': 'A' ,
'GCA': 'A',
'GCG': 'A' ,
'TAT': 'Y',
'TAC': 'Y' ,
'TAA': 'STOP',
'TAG': 'STOP' ,
'CAT': 'H',
'CAC': 'H' ,
'CAA': 'Q',
'CAG': 'Q' ,
'AAT': 'N',
'AAC': 'N' ,
'AAA': 'K',
'AAG': 'K' ,
'GAT': 'D',
'GAC': 'D' ,
'GAA': 'E',
'GAG': 'E' ,
'TGT': 'C',
'TGC': 'C' ,
'TGA': 'STOP',
'TGG': 'W' ,
'CGT': 'R',
'CGC': 'R' ,
'CGA': 'R',
'CGG': 'R' ,
'AGT': 'S',
'AGC': 'S' ,
'AGA': 'R',
'AGG': 'R' ,
'GGT': 'G',
'GGC': 'G' ,
'GGA': 'G',
'GGG': 'G'
}
def codons_to_aa(orf):
if len(orf) % 3 is not 0:
return None
aa_seq=[]
for index in range(len(orf)):
current_codon=orf[index:index+3]
current_aa=amino_acides[current_codon]
aa_seq.append(current_aa)
index=index+3
if amino_acides[current_aa] == 'STOP':
break
return aa_seq
def get_orfs(genome):
allowed_chars = ['a','g','c','t','A','G','C','T']
threshold=33
flag=True
for letter in genome:
if letter not in allowed_chars:
flag=False
break
if flag is False:
raise TypeError('Invalid DNA sequence')
else:
#3ün katı olması lazım aldığının
#atg indexini bul stop adresini bul. aradaki substringi al
#frame1=genome[0:]
#frame2=genome[1:]
#frame3=genome[2:]
#complementini al geçen seferki function la
#frame1_c=complementary(frame1)[::-1]
#frame2_c=complementary(frame2)[::-1]
#frame3_c=complementary(frame3)[::-1]
start_indexes=[]
stop_indexes=[]
stops=["TAA","TAG","TGA"]
start=0
counter=0
for i in range (3):
start_indexes.append([])
stop_indexes.append([])
#Add to the lists the positions of the start and stop codons.
while (genome and counter < 3):
for i in range(start,len(genome),3):
codon = genome[i:i+3] #The codon is 3 nucleotides.
#print codon+ "\t"
if(codon == "ATG"): #If the codon is a start codon.
start_indexes[start].append(i+1) #The position of the start codon.
if(codon in stops): #if the codon is a stop codon.
stop_indexes[start].append(i+1) #The position of the stop codon.
start += 1 #promotes the starting position.
counter += 1 #promotes the counter
orfs2=[]
for i in range(3):
if (len(stop_indexes[i] ) !=0 and len(start_indexes) !=0) :
for a in range(len(start_indexes[i])):
for b in range(len(stop_indexes[i])):
if ((start_indexes[i][a] < stop_indexes[i][b] ) and (stop_indexes[i][b] - start_indexes[i][a] > threshold )) :
orf=codons_to_aa(genome[start_indexes[i][a]:stop_indexes[i][b]+1])
orfs2.append([start_indexes[i][a],stop_indexes[i][b],orf,False])
#-------------------complementary part ------------------------
genome_complementary=complementary(genome)[::-1]
start_indexes_comp=[]
stop_indexes_comp=[]
start=0
counter=0
for i in range (3):
start_indexes_comp.append([])
stop_indexes_comp.append([])
#Add to the lists the positions of the start and stop codons.
while (genome_complementary and counter < 3):
for i in range(start,len(genome_complementary),3):
codon = genome_complementary[i:i+3] #The codon is 3 nucleotides.
#print codon+ "\t"
if(codon == "ATG"): #If the codon is a start codon.
start_indexes_comp[start].append(i+1) #The position of the start codon.
if(codon in stops): #if the codon is a stop codon.
stop_indexes_comp[start].append(i+1) #The position of the stop codon.
start += 1 #promotes the starting position.
counter += 1 #promotes the counter
orfs=[]
for i in range(3):
if (len(stop_indexes_comp[i] ) !=0 and len(start_indexes_comp) !=0) :
for a in range(len(start_indexes_comp[i])):
for b in range(len(stop_indexes_comp[i])):
if ((start_indexes_comp[i][a] > stop_indexes_comp[i][b] ) and (start_indexes_comp[i][a] - stop_indexes_comp[i][b] > threshold )) :
orf=codons_to_aa(genome_complementary[start_indexes_comp[i][a]:stop_indexes_comp[i][b]+1])
orfs.append([start_indexes_comp[i][a],stop_indexes_comp[i][b],orf,True])
print (start_indexes)
print (start_indexes_comp)
print (stop_indexes)
print (stop_indexes_comp)
print(orfs)
orfs_total=orfs2 + orfs
return orfs
<file_sep>##############
# Exercise 2.5
##############
# You can use the supplied test cases for your own testing. Good luck!
from genetic_code import codons_to_aa
def main():
print(get_orfs(read_genome("C:/Users/pkh/PycharmProjects/pp1ss19exercise1-exercise-ga65coy/tests/genome.txt")))
def read_genome(file):
lines = open(file, "r")
genome = ""
for line in lines:
genome += line.strip()
lines.close()
return genome
# Returns reverse complement of a sequence
def reverse_complement(seq):
dict = {"A": "T", "T": "A", "G": "C", "C": "G"}
t = seq.maketrans(dict)
return seq[::-1].translate(t)
def generate_seq(input):
return [(input, False, 0), (input[1:]+input[0], False,1), (input[2:]+input[0:2], False,2), (reverse_complement(input), True,0),(reverse_complement(input[1:]+input[0]), True,1), (reverse_complement(input[2:]+input[0:2]), True,2)]
def validation(input):
valid = 'ATGC'
return all(i in valid for i in input)
def get_start_end(seq):
start_codon = ['ATG']
end_codon = ['TAA', 'TAG', 'TGA']
started = False
ended = False
seq = seq + seq[0:2]
for i in range(0,len(seq),3):
first_codon = seq[i:i + 3]
if first_codon in start_codon and not started and ended:
seq = seq[i:]+seq[2:]
start_i = i
started = True
if not started and not ended and first_codon in end_codon:
ended = True
return seq, start_i
def get_orfs(input):
if not validation(input):
raise TypeError()
start_codon = ['ATG']
end_codon = ['TAA', 'TAG', 'TGA']
orfs = []
gen_seq = generate_seq(input)
for seq,flag,inc in gen_seq:
started = False
newseq, i = get_start_end(seq)
for j in range(0, len(newseq), 3):
codon = newseq[j:j + 3]
if codon in start_codon and started is False:
started = True
start_index = (i+j) % len(seq)
start_index_no_mod = j
if codon in end_codon:
if started and j-start_index_no_mod > 99:
modified_i = (i + j) % len(seq)
if flag:
orfs.append((len(seq)-1-(start_index - inc), len(seq)-1-(modified_i + 2 - inc), codons_to_aa(newseq[start_index_no_mod:j]), flag))
else:
orfs.append((start_index+inc, modified_i+2+inc, codons_to_aa(newseq[start_index_no_mod:j]), flag))
started = False
return orfs
#print(get_start_end("TGATAAGAGAGAA"))
'''
for i in range(0,len(seq),3):
first_codon = seq[i:i + 3]
if first_codon in start_codon:
newseq = seq[i:] + seq[0:i]'''<file_sep>import re
from itertools import takewhile, cycle
from itertools import chain, repeat
dna_codon_dict = {
'TTT':'F','TTC':'F','TTA':'L','TTG':'L','CTT':'L','CTC':'L','CTA':'L','CTG':'L',
'ATT':'I','ATC':'I','ATA':'I','ATG':'M','GTT':'V','GTC':'V','GTA':'V','GTG':'V',
'TCT':'S','TCC':'S','TCA':'S','TCG':'S','CCT':'P','CCC':'P','CCA':'P','CCG':'P',
'ACT':'T','ACC':'T','ACA':'T','ACG':'T','GCT':'A','GCC':'A','GCA':'A','GCG':'A',
'TAT':'Y','TAC':'Y','TAA':'','TAG':'','CAT':'H','CAC':'H','CAA':'Q','CAG':'Q',
'AAT':'N','AAC':'N','AAA':'K','AAG':'K','GAT':'D','GAC':'D','GAA':'E','GAG':'E',
'TGT':'C','TGC':'C','TGA':'','TGG':'W','CGT':'R','CGC':'R','CGA':'R','CGG':'R',
'AGT':'S','AGC':'S','AGA':'R','AGG':'R','GGT':'G','GGC':'G','GGA':'G','GGG':'G'
}
comp_tab = {
'A' : 'T',
'T' : 'A',
'C' : 'G',
'G' : 'C'
}
# Return complementary strand
def complementary(strand):
return ''.join(comp_tab[char] for char in strand.upper())
# Return reverse complementary strand
def reverse_complementary(strand):
comp_strand = complementary(strand)
reverse_comp_strand = comp_strand[::-1]
return reverse_comp_strand
# Return the given ORF encoded as an amino acid sequence
def codons_to_aa(orf):
if len(orf) % 3 != 0 or len(orf) == 0:
return None
orf = orf.upper()
aa_seq = []
for i in range(0, len(orf), 3):
codon = orf[i:i+3]
aa_seq.append(dna_codon_dict.get(codon))
return ''.join(aa_seq)
#Read file with genome data
def get_genome(filename):
f = open(filename, "r")
data = f.read()
data = data.splitlines()
genome = "".join(data)
return genome
# "Returns the sequence elements n times"
def ncycles(iterable, n):
return chain.from_iterable(repeat(tuple(iterable), n))
def get_relative_index(genome, codon, isStart, isReverseComp, isCircular):
idx = 0
if (isCircular):
original_genome_length = (len(genome)/2)
else:
original_genome_length = len(genome)
orf_idx = genome.index(codon)
if (orf_idx > original_genome_length): #if (isCircular):
orf_idx = original_genome_length - (orf_idx % original_genome_length)
if (not isReverseComp):
if (isStart):
idx = orf_idx
else:
idx = orf_idx + 2
else:
if (isStart):
idx = (original_genome_length - 1) - orf_idx
else:
idx = (original_genome_length - 1) - (orf_idx + 2)
return idx
def find_orfs(genome, isRevComp, isCircular):
orfs_list = []
aa_seq_added = []
for frame in range(0, 3, 1):
for i in range(frame, len(genome), 3):
start_codon = genome[i:]
if (start_codon.startswith('ATG')):
for j in range(i, len(genome), 3):
stop_codon = genome[j:]
if (stop_codon.startswith(('TAA', 'TGA', 'TAG'))):
orf = genome[genome.index(start_codon) : genome.index(stop_codon)]
if (len(orf)%3==0):
aa_seq = codons_to_aa(orf)
# Check if the amino acid sequence has already added to the ist
repeated = False
for seq in aa_seq_added:
if (seq == aa_seq):
repeated = True
break;
if (not repeated):
aa_seq_added.append(aa_seq)
start_idx = get_relative_index(genome, start_codon, True, isRevComp, isCircular)
stop_idx = get_relative_index(genome, stop_codon, False, isRevComp, isCircular)
entry = (start_idx, stop_idx, aa_seq, isRevComp)
# Check if amino acid sequence size is longer that 33
if (aa_seq != None and (len(aa_seq) >= 34)):
#Check if is an overlapping sequence
isSubORF = False
for element in orfs_list:
if ((stop_idx in element) and len(aa_seq) < len(element[2])):
isSubORF = True
if (not isSubORF):
orfs_list.append(entry)
i = genome.index(stop_codon)+2
break;
return orfs_list
def get_orfs(genome):
orfs = []
orfs_prim_strand = []
orfs_rev_comp = []
orfs_prim_strand_circular = []
orfs_rev_comp_curcular = []
dna_nt = "ACGT"
genome = genome.upper()
# Check if genome data is valid
if(all(nt in dna_nt for nt in genome)):
orfs_prim_strand = find_orfs(genome, False, False)
rev_comp = reverse_complementary(genome)
orfs_rev_comp = find_orfs(rev_comp, True, False)
circular_dna = ''.join(c for c in ncycles(genome, 2))
orfs_prim_strand_circular = find_orfs(circular_dna, False, True)
rev_comp = reverse_complementary(circular_dna)
orfs_rev_comp_curcular = find_orfs(rev_comp, True, True)
else:
raise TypeError("Invalid DNA sequence! Check for invalid Nucleotides in the provided genome")
orfs = list(set(orfs_prim_strand + orfs_rev_comp + orfs_prim_strand_circular + orfs_rev_comp_curcular))
return orfs
# def main():
# genome = get_genome("./tests/genome.txt")
# print(get_orfs(genome))
# main()<file_sep>import numpy as np
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1 #column -> [][x]
self.string2 = string2 #row -> [x][]
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.alignments = []
#self.alignments.append(string2)
self.align()
#print(self.alignments)
#print(self.score_matrix)
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
score = 0
# fill row (string2)
self.fill_initial_score_matrix(True, 0, score)
# fill column (string1)
self.fill_initial_score_matrix(False, 0,score)
self.fill_score_matrix()
#get alignments from matrix
self.make_alignments(self.score_matrix.shape[0]-1, self.score_matrix.shape[1]-1, "")
def make_alignments(self, m, n, alignment):
if m == 0 and n == 0:
self.alignments.append((self.string1,alignment[::-1]))
return alignment[::-1]
else:
pos1 = self.score_matrix[m - 1][n - 1] + self.substitution_matrix[self.string2[m - 1]][self.string1[n - 1]]
pos2 = self.score_matrix[m - 1][n] + self.gap_penalty
pos3 = self.score_matrix[m][n - 1] + self.gap_penalty
if self.score_matrix[m][n] == pos1:
#substitution
alignment2 = alignment + self.string2[m - 1]
self.make_alignments(m-1, n-1, alignment2)
if self.score_matrix[m][n] == pos2:
#moved right
alignment2 = alignment + self.string2[m - 1]
self.make_alignments(m - 1, n, alignment2)
if self.score_matrix[m][n] == pos3:
#moved down
alignment2 = alignment + "-"
self.make_alignments(m, n - 1, alignment2)
def fill_score_matrix(self):
for m in range(1, len(self.string2)+1):
for n in range(1, len(self.string1)+1):
pos1 = self.score_matrix[m-1][n-1] + self.substitution_matrix[self.string2[m-1]][self.string1[n-1]]
pos2 = self.score_matrix[m-1][n] + self.gap_penalty
pos3 = self.score_matrix[m][n-1] + self.gap_penalty
self.score_matrix[m][n] = max(pos1, pos2, pos3)
def fill_initial_score_matrix(self, is_row, step_nr, score):
curr_gap_penalty = score
if is_row:
for foo in range(len(self.string2) + 1):
self.score_matrix[foo][step_nr] = curr_gap_penalty
curr_gap_penalty += self.gap_penalty
else:
for foo in range(len(self.string1) + 1):
self.score_matrix[step_nr][foo] = curr_gap_penalty
curr_gap_penalty += self.gap_penalty
def get_best_score(self):
"""
:return: the highest score for the aligned strings, int
"""
res = self.score_matrix[self.score_matrix.shape[0]-1][self.score_matrix.shape[1]-1]
return res
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score
"""
return len(self.get_alignments())
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
return self.alignments
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as a list of lists
"""
return self.score_matrix
<file_sep>##############
# Exercise 2.6
##############
from collections import defaultdict
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
return len(self.__sequences)
def get_average_length(self):
sum = 0
for seq in self.__sequences:
sum += len(seq)
print(sum / self.get_counts(), sum)
return sum / self.get_counts()
def add_sequence(self, seq):
self.__sequences.append(seq)
def read_fasta(self, path):
with open(path, "r") as f:
seq = ""
sequence_started = False
for line in f:
if line.startswith(">") or line.startswith(";"):
if sequence_started:
print(seq)
self.add_sequence(seq)
seq = ""
sequence_started = False
continue
sequence_started = True
seq += line.strip().replace("*","")
self.add_sequence(seq)
pass
def get_abs_frequencies(self):
# return number of occurences not normalized by length
abs = defaultdict(int)
for seq in self.__sequences:
for aa in seq:
abs[aa] += 1
return abs
def get_av_frequencies(self):
# return number of occurences normalized by length
sum = 0
avg = 0.0
for seq in self.__sequences:
sum += len(seq)
abs = self.get_abs_frequencies()
for key, value in abs.items():
abs[key] /= sum
return abs<file_sep>##############
# Exercise 2.5
##############
# You can use the supplied test cases for your own testing. Good luck!
import re
genlib = {
'ATA': 'I', 'ATC': 'I', 'ATT': 'I', 'ATG': 'M',
'ACA': 'T', 'ACC': 'T', 'ACG': 'T', 'ACT': 'T',
'AAC': 'N', 'AAT': 'N', 'AAA': 'K', 'AAG': 'K',
'AGC': 'S', 'AGT': 'S', 'AGA': 'R', 'AGG': 'R',
'CTA': 'L', 'CTC': 'L', 'CTG': 'L', 'CTT': 'L',
'CCA': 'P', 'CCC': 'P', 'CCG': 'P', 'CCT': 'P',
'CAC': 'H', 'CAT': 'H', 'CAA': 'Q', 'CAG': 'Q',
'CGA': 'R', 'CGC': 'R', 'CGG': 'R', 'CGT': 'R',
'GTA': 'V', 'GTC': 'V', 'GTG': 'V', 'GTT': 'V',
'GCA': 'A', 'GCC': 'A', 'GCG': 'A', 'GCT': 'A',
'GAC': 'D', 'GAT': 'D', 'GAA': 'E', 'GAG': 'E',
'GGA': 'G', 'GGC': 'G', 'GGG': 'G', 'GGT': 'G',
'TCA': 'S', 'TCC': 'S', 'TCG': 'S', 'TCT': 'S',
'TTC': 'F', 'TTT': 'F', 'TTA': 'L', 'TTG': 'L',
'TAC': 'Y', 'TAT': 'Y', 'TAA': '_', 'TAG': '_',
'TGC': 'C', 'TGT': 'C', 'TGA': '_', 'TGG': 'W',
}
def codons_to_aa(orf_list):
return ''.join([genlib[codon] for codon in orf_list])
def complementary(seq):
r_seq = ""
for s in seq:
if s is "T":
r_seq += "A"
if s is "A":
r_seq += "T"
if s is "C":
r_seq += "G"
if s is "G":
r_seq += "C"
return r_seq
start_codon = 'ATG'
end_codons = ['TAA', 'TAG', 'TGA']
def calculate_end_position_ring(af_end_first, af_start_pos, length, rev):
start = (af_start_pos + 3 * af_end_first + 2) % length
if rev:
start = length - 1 - start
return start
def calculate_start_position(ind_s, seq_start, length, rev):
start = seq_start + 3 * ind_s
if rev:
start = length - 1 - start
return start
def calculate_positions(ind_s, ind_e, seq_start, length, rev):
start = calculate_start_position(ind_s, seq_start, length, rev)
end = seq_start + 3 * ind_e + 2
if rev:
end = length - 1 - end
return start, end
def put_if_exists(l, subl):
if subl is not None and len(subl) > 0:
l.extend(subl)
def generate_ends_lib(seq_arr):
endslib = {}
for codon in end_codons:
endslib[codon] = [index for index, value in enumerate(seq_arr) if value == codon]
return endslib
def find_stop_minimum_for_start(start_index, endslib):
mins = []
for codon in end_codons:
tmp = [i for i in endslib[codon] if i > start_index]
if len(tmp) > 0:
mins.append(tmp[0])
if len(mins) > 0:
return min(mins)
else:
return None
def try_to_get_orf(seq_arr, length, start_position, rev, after_data, end_codon_sorted):
if not (start_codon in seq_arr):
return None
if not any([e in seq_arr for e in end_codons]):
return None
starts_arr = [index for index, value in enumerate(seq_arr) if value == start_codon]
endslib = generate_ends_lib(seq_arr)
for ind_start in starts_arr:
end_first = find_stop_minimum_for_start(ind_start, endslib)
if end_first is not None:
if end_first - ind_start > 33:
s_pos, e_pos = calculate_positions(ind_start, end_first, start_position, length, rev)
seq_to_translate = seq_arr[ind_start:end_first]
to_add = (s_pos, e_pos, codons_to_aa(seq_to_translate), rev)
if not (e_pos in end_codon_sorted):
end_codon_sorted[e_pos] = []
end_codon_sorted[e_pos].append(to_add)
else:
af_endslib = generate_ends_lib(after_data['seq'])
af_end_first = find_stop_minimum_for_start(-1, af_endslib)
if af_end_first is not None:
af_s_pos = calculate_start_position(ind_start, start_position, length, rev)
af_e_pos = calculate_end_position_ring(af_end_first, after_data['start'], length, rev)
af_seq_to_translate = seq_arr[ind_start:]
af_seq_to_translate.extend(after_data['seq'][:af_end_first])
if len(af_seq_to_translate) > 33:
af_to_add = (af_s_pos, af_e_pos, codons_to_aa(af_seq_to_translate), rev)
if not (af_e_pos in end_codon_sorted):
end_codon_sorted[af_e_pos] = []
end_codon_sorted[af_e_pos].append(af_to_add)
def split_frame_analyse(in_genome, reverse, found_orfs):
orfs_list = []
len_gen = len(in_genome)
for i in range(0, 3, 1):
seq_base = [in_genome[j: j+3] for j in range(i, len_gen - i - (len_gen - i) % 3, 3)]
leftover_ind = (len_gen - (len_gen - i) % 3) % len_gen
leftover_genome_repeat = ""
if leftover_ind != 0:
leftover_genome_repeat = in_genome[leftover_ind:]
leftover_genome_repeat += in_genome + in_genome + in_genome[:i]
len_lgr = len(leftover_genome_repeat)
after_data = {
'start': leftover_ind,
'seq': [leftover_genome_repeat[j: j+3] for j in range(0, len_lgr - len_lgr % 3, 3)]
}
try_to_get_orf(seq_base, len(in_genome), i, reverse, after_data, found_orfs)
return found_orfs
def overlappins_clean(ofrs):
f_res = []
for end_key in ofrs:
min_el = ofrs[end_key][0]
for el in ofrs[end_key]:
if len(el[2]) > len(min_el[2]):
min_el = el
f_res.append(min_el)
if len(f_res) == 0:
raise TypeError
return f_res
def get_orfs(genome):
not_dna = re.compile("[^ATCG]")
if not_dna.match(genome) is not None:
raise TypeError
got_orfs = {}
split_frame_analyse(genome, False, got_orfs)
rc_genome = ''.join(reversed(complementary(genome)))
split_frame_analyse(rc_genome, True, got_orfs)
return overlappins_clean(got_orfs)
<file_sep>*AUTHOR: <NAME>, 2019*
This is a plagiarism check for the Code homeworks handed in by the students
in the ProtienPrediction excercise SS 2019.
# Automatically download git repos (test_git folder)
Before starting the following needs to be done for each excercise:
-In additional_info/student_lists save the names of the students which participated in a .csv file as 'students_<#ex>.csv (export names in Artemis)
-In additional_info/templates/ save the template files as .py files in templates folder
-Adjust the <files_x> var in codechecker/downloadCode.sh (list the files in the excercise)
1. Adjust the <dirs> var in codechecker/checkCode.sh (list the excercises you want to check)
2. Run checkCode.sh in codechecker directory
All results can be found in results/ex_<i>/ folder
<file_sep># -*- coding: utf-8 -*-
"""
IMPORTANT!:
Before writing an email asking questions such as
'What does this input has to be like?' or
'What return value do you expect?' PLEASE read our
exercise sheet and the information in this template
carefully.
If something is still unclear, PLEASE talk to your
colleagues before writing an email!
If you experience technical issues or if you find a
bug we are happy to answer your questions. However,
in order to provide quick help in such cases we need
to avoid unnecessary emails such as the examples
shown above.
"""
from Bio import SeqIO # Tip: This module might be useful for parsing...
import xml.etree.ElementTree as ET
import os
############ Exercise 3: SwissProt ##########
class SwissProt_Parser:
PARSER = SeqIO
def __init__( self, path, frmt='uniprot-xml' ):
'''
Initialize every SwissProt_Parser with a path to a XML-formatted UniProt file.
An example file is included in the repository (P09616.xml).
Tip: Store the parsed XML entry in an object variable instead of parsing it
again & again ...
'''
""" elif el.tag == '''{http://uniprot.org/uniprot}name''' and el.get('type') == 'scientific':
self.org = el.text """
tree = ET.parse(path)
root = tree.getroot()
self.pdb_supp = []
found_loc = False
for el in root.iter():
if el.tag == '''{http://uniprot.org/uniprot}sequence''':
self.seq_length = el.get('length')
elif el.tag == '''{http://uniprot.org/uniprot}accession''':
self.id = el.text
elif el.tag == '''{http://uniprot.org/uniprot}location''' and not found_loc:
self.loc = el.text.split()
found_loc = True
elif el.tag == '''{http://uniprot.org/uniprot}dbReference''' and el.get('type') == 'PDBsum':
self.pdb_supp.append(el.get('id'))
""" elif el.tag == '''{http://uniprot.org/uniprot}organism''':
name = el.find('''{http://uniprot.org/uniprot}name''')
if name.text:
self.org = name.text """
""" elif el.tag == '''{http://uniprot.org/uniprot}name''' and el.get('type') == 'scientific':
self.org = el.text """
self.sp_anno = SeqIO.parse(path, frmt) # Parse the XML file once and re-use it in the functions below
for record in self.sp_anno:
self.org = record.annotations['organism']
# 3.2 SwissProt Identifiers
def get_sp_identifier( self ):
'''
Input:
self: Use XML entry which has been parsed & saved during object initialization
Return:
Unique SwissProt identifier for the given xml file
'''
return self.id
# 3.3 SwissProt Sequence length
def get_sp_sequence_length( self ):
'''
Input:
self: Use XML entry which has been parsed & saved during object initialization
Return:
Return sequence length of the UniProt entry as an integer.
'''
return int(self.seq_length)
# 3.4 Organism
def get_organism( self ):
'''
Input:
self: Use XML entry which has been parsed & saved during object initialization
Return:
Return the name of the organsim as stated in the corresponding field
of the XML data. Return value has to be a string.
'''
return self.org
# return self.sp_anno.annotations['organism']
# 3.5 Localizations
def get_localization( self ):
'''
Input:
self: Use XML entry which has been parsed & saved during object initialization
Return:
Return the name of the subcellular localization as stated in the
corresponding field.
Return value has to be a list of strings.
'''
return self.loc
# 3.6 Cross-references to PDB
def get_pdb_support( self ):
'''
Input:
self: Use XML entry which has been parsed & saved during object initialization
Return:
Returns a list of all PDB IDs which support the annotation of the
given SwissProt XML file. Return the PDB IDs as list.
'''
return self.pdb_supp
def main():
print('SwissProt XML Parser class')
spp = SwissProt_Parser(os.path.join(os.path.dirname(__file__), "tests", "P09616.xml"))
return None
if __name__ == '__main__':
main()
<file_sep>##############
# Exercise 2.7
##############
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
positively_charged = {'H', 'K', 'R'}
if aa in positively_charged:
return True
return False
def isNegativelyCharged(aa):
negatively_charged = {'D', 'E'}
if aa in negatively_charged:
return True
return False
def isHydrophobic(aa):
hydrophobic = {'A', 'F', 'I', 'L', 'M', 'V', 'W', 'Y'}
if aa in hydrophobic:
return True
return False
def isAromatic(aa):
aromtic = {'F', 'H', 'W', 'Y'}
if aa in aromtic:
return True
return False
def isPolar(aa):
polar = {'D', 'E', 'H', 'K', 'N', 'Q', 'R', 'S', 'T', 'Y'}
if aa in polar:
return True
return False
def isProline(aa):
proline = {'P'}
if aa in proline:
return True
return False
def containsSulfur(aa):
sulfur_contain = {'C', 'M'}
if aa in sulfur_contain:
return True
return False
def isAcid(aa):
acid = {'D', 'E'}
if aa in acid:
return True
return False
def isBasic(aa):
basic = {'H', 'K', 'R'}
if aa in basic:
return True
return False<file_sep>##############
# Exercise 2.7
##############
positives = {'R', 'K', 'H'}
negatives = {'E', 'D'}
aromatic = {'H', 'F', 'W', 'Y'}
polar = {'D', 'E', 'H', 'K', 'N', 'O', 'Q', 'R', 'S', 'T', 'Y'}
acid = {'D', 'E'}
basic = {'R', 'H', 'K'}
# hydrophobic = {'A', 'C', 'F', 'G', 'I', 'L', 'M', 'P', 'V', 'W'}
hydrophobic = {'A', 'V', 'I', 'L', 'M', 'F', 'Y', 'W'}
proline = {'P'}
sulfur = {'C', 'M'}
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
return aa in positives
def isNegativelyCharged(aa):
return aa in negatives
def isHydrophobic(aa):
return aa in hydrophobic
def isAromatic(aa):
return aa in aromatic
def isPolar(aa):
return aa in polar
def isProline(aa):
return aa in proline
def containsSulfur(aa):
return aa in sulfur
def isAcid(aa):
return aa in acid
def isBasic(aa):
return aa in basic
<file_sep>##############
# Exercise 2.6
##############
import statistics
from collections import Counter
class AADist:
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
return counter
def get_average_length(self):
print(length)
return statistics.mean(length)
def read_fasta(self, path):
global counter
global length
global abs_frequencies
global a_frequencies
counter=0
length=[]
lines=open(path, "r")
fastas=[]
oldFasta=True
currentFasta=''
for line in lines:
if line.startswith(";") or line.startswith(">"):
if oldFasta:
oldFasta=False
if currentFasta !='':
length.append(len(currentFasta.replace('*','')))
fastas.append(currentFasta)
counter+=1
currentFasta=''
continue
oldFasta=True
currentFasta += line.strip()
fastas.append(currentFasta)
length.append(len(currentFasta))
lines.close()
frequent=str.join('',fastas)
frequent=frequent.replace('*','')
a_frequencies=len(frequent)
abs_frequencies=Counter(frequent)
print(fastas)
def get_abs_frequencies(self):
# return number of occurences not normalized by length
return abs_frequencies
def get_av_frequencies(self):
# return number of occurences normalized by length
print(a_frequencies)
for item in abs_frequencies:
abs_frequencies[item] = abs_frequencies[item]/a_frequencies
return abs_frequencies
<file_sep>##############
# Exercise 2.6
##############
amino_acids = ["A", "R", "N", "D", "C", "Q", "E", "G", "H", "I", "M",
"L", "K", "F", "P", "S", "T", "W", "Y", "V"]
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
return len(self.__sequences)
def get_average_length(self):
sum = 0.0
for seq in self.__sequences:
sum += len(seq)
return sum / float(self.get_counts())
def clean_seq(self, seq):
for aa in seq:
if aa not in amino_acids:
seq = seq.replace(aa, "")
return seq
def read_fasta(self, path):
with open(path, "r") as fd:
seq = ""
sequence_started = False
for line in fd:
if line.startswith(">") or line.startswith(";"):
if sequence_started:
self.__sequences.append(self.clean_seq(seq))
seq = ""
sequence_started = False
continue
sequence_started = True
seq += line.strip()
self.__sequences.append(self.clean_seq(seq))
def get_abs_frequencies(self):
ret = {}
for aa in amino_acids:
ret[aa] = 0
for seq in self.__sequences:
ret[aa] += seq.count(aa)
return ret
def printSeq(self):
return self.__sequences
def get_av_frequencies(self):
sum = 0.0
for seq in self.__sequences:
sum += len(seq)
ret = {}
for key, val in self.get_abs_frequencies().items():
ret[key] = float(val) / sum
return ret
<file_sep>import numpy as np
import itertools
from pathlib import Path
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.sequences=[]
##self.word_count=np.zeros((8000))
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
self.sequences.append(sequence)
#self.sequence
def getAll(self):
return self.sequences
def create_np(self):
self.seq=np.asarray(self.sequences)
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
#self.seq=np.asarray(self.sequences)
#return self.seq[np.where(word in self.seq)].tolist()
output=[]
for sequence in self.sequences:
if word in sequence:
output.append(sequence)
return output
def get_seq(self):
return self.sequences
def get_index(self,word):
return ALPHABET.index(word[0])*400+ALPHABET.index(word[1])*20+ALPHABET.index(word[2])
def get_word(self,index):
first=index/400
index=index%400
second=index/20
index=index%20
third=index
return ALPHABET[int(first)]+ALPHABET[int(second)]+ALPHABET[int(third)]
def get_num_seq_cont_each_word(self):
total_word_count=np.zeros(8000)
for sequence in self.sequences:
words_in_seq=np.zeros(8000)
for i in range(len(sequence)-2):
word=sequence[i:i+3]
words_in_seq[self.get_index(word)]=1
total_word_count+=words_in_seq
sum_array=np.sum(total_word_count)
total=(total_word_count>0).sum()
return int(sum_array/total+0.5)
def get_num_words_in_sequence(self,sequence):
word_count=np.zeros((8000))
for i in range(len(sequence)-2):
word=sequence[i:i+3]
word_count[self.get_index(word)]+=1
return (word_count>0).sum()
def get_num_words_in_all_sequences(self):
self.word_count=np.zeros((8000))
for sequence in self.sequences:
for i in range(len(sequence)-2):
word=sequence[i:i+3]
#print(word)
self.word_count[self.get_index(word)]+=1
return (self.word_count>0).sum()
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
self.word_count=np.zeros((8000))
total_word_count=np.zeros(8000)
count=0
for sequence in self.sequences:
words_in_seq=np.zeros(8000)
count+=self.get_num_words_in_sequence(sequence)
for i in range(len(sequence)-2):
word=sequence[i:i+3]
self.word_count[self.get_index(word)]+=1
words_in_seq[self.get_index(word)]=1
total_word_count+=words_in_seq
num_of_words_in_database= (self.word_count>0).sum()
count/=len(self.sequences)
count=int(count+0.5)
sum_array=np.sum(total_word_count)
total=(total_word_count>0).sum()
avg4=int(sum_array/total+0.5)
return (len(self.sequences), num_of_words_in_database, count, avg4)
class Blast:
def get_index(self,word):
return ALPHABET.index(word[0])*400+ALPHABET.index(word[1])*20+ALPHABET.index(word[2])
def get_word(self,index):
first=index/400
index=index%400
second=index/20
index=index%20
third=index
return ALPHABET[int(first)]+ALPHABET[int(second)]+ALPHABET[int(third)]
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.sub_matrix=substitution_matrix
#self.compute_scores_for_word_pairs()
##use sub matrix tocompute possible word combinations
def compute_scores_for_word_pairs(self):
#pass
self.word_pairs=np.zeros((8000,8000))
for i in range(800):
for j in range(i,800):
self.word_pairs[i][j]=self.sub_matrix[AA_TO_INT[self.get_word(i)[0]]][AA_TO_INT[self.get_word(j)[0]]]+self.sub_matrix[AA_TO_INT[self.get_word(i)[1]]][AA_TO_INT[self.get_word(j)[1]]]+self.sub_matrix[AA_TO_INT[self.get_word(i)[2]]][AA_TO_INT[self.get_word(j)[2]]]
def get_words_orig(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
self.word_list=[]
found_words=np.zeros(8000)
if sequence!=None:
for i in range(len(sequence)-2):
query=sequence[i:i+3]
for j in range(8000):
word=self.get_word(j)
#compute score
score=self.sub_matrix[AA_TO_INT[query[0]]][AA_TO_INT[word[0]]]
score+=self.sub_matrix[AA_TO_INT[query[1]]][AA_TO_INT[word[1]]]
score+=self.sub_matrix[AA_TO_INT[query[2]]][AA_TO_INT[word[2]]]
if score>=T:
found_words[j]=1
self.word_list.append((word,i))
else:
for i in range (pssm.shape[0]-2):
for j in range(8000):
word=self.get_word(j)
score=pssm[i][AA_TO_INT[(word[0])]]
score+=pssm[i+1][AA_TO_INT[(word[1])]]
score+=pssm[i+2][AA_TO_INT[(word[2])]]
if score>=T:
found_words[j]=1
self.word_list.append((word,i))
output=[]
for i in range (8000):
if found_words[i]>0:
output.append(self.get_word(i))
return output
def get_words(self, *, sequence=None, pssm=None, T=11):
self.word_list=[]
words=set()
self.word_score={}
if sequence!=None:
for i in range(len(sequence)-2):
#query=sequence[i:i+3]
for j in range(8000):
word=self.get_word(j)
#compute score
score=self.sub_matrix[AA_TO_INT[sequence[i]]][AA_TO_INT[word[0]]]
score+=self.sub_matrix[AA_TO_INT[sequence[i+1]]][AA_TO_INT[word[1]]]
score+=self.sub_matrix[AA_TO_INT[sequence[i+2]]][AA_TO_INT[word[2]]]
self.word_score[(sequence[i:i+3],word)]=score
#if sequence[i:i+3] =="MIA":
# print("found")
if score>=T:
self.word_list.append((word,i))
words.add(word)
else:
for i in range (pssm.shape[0]-2):
for j in range(8000):
word=self.get_word(j)
score=pssm[i][AA_TO_INT[(word[0])]]
score+=pssm[i+1][AA_TO_INT[(word[1])]]
score+=pssm[i+2][AA_TO_INT[(word[2])]]
if score>=T:
self.word_list.append((word,i))
words.add(word)
self.words=words
return words
def get_words_wrong(self, *, sequence=None, pssm=None, T=11):
self.word_list=[]
words=set()
if sequence!=None:
for i in range(len(sequence)-2):
#query=sequence[i:i+3]
for j in range(8000):
word=self.get_word(j)
#compute score
score= self.score_for_two_words[(self.get_index(sequence[i]+sequence[i+1]+sequence[i+2]),j)]
if score>=T:
self.word_list.append((word,i))
words.add(word)
else:
for i in range (pssm.shape[0]-2):
for j in range(8000):
word=self.get_word(j)
score=pssm[i][AA_TO_INT[(word[0])]]
score+=pssm[i+1][AA_TO_INT[(word[1])]]
score+=pssm[i+2][AA_TO_INT[(word[2])]]
if score>=T:
self.word_list.append((word,i))
words.add(word)
return words
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
#precompute
self.preSeq(blast_db)
self.get_words(sequence=query,pssm=pssm,T=T)
dict_getSequences=self.dict_contained
d = dict()
found_tup=0
if True:
for word,start_query in self.word_list:
#print(self.word_list)
for target in dict_getSequences[word]:
start_target=0
while (1==1):
start_target=target.find(word,start_target)
if start_target==-1:
break
if query!=None:
temp=self.findHSP(word,target,query,start_target,start_query,S,X)
else:
temp=self.findHSP_PSSM(word,target,pssm,start_target,start_query,S,X)
print(temp)
#if target[0:5]=="MNFAE" and False:
# print("MNFAE ==>",temp, "--",word,start_target,start_query)
#min score
if temp[3]>=S:
if target in d:
existing_tuples=d[target]
else:
existing_tuples=[]
if not temp in existing_tuples:
existing_tuples.append(temp)
d[target]=existing_tuples
start_target+=1
return d
def findHSP(self,word,target,query,start_target,start_query,S,X):
#score for first 3 AAs
score=self.sub_matrix[AA_TO_INT[target[start_target]]][AA_TO_INT[query[start_query]]]
score+=self.sub_matrix[AA_TO_INT[target[start_target+1]]][AA_TO_INT[query[start_query+1]]]
score+=self.sub_matrix[AA_TO_INT[target[start_target+2]]][AA_TO_INT[query[start_query+2]]]
#extend right
current_index_query=start_query+3
current_index_target=start_target+3
max_score=score
index_of_max_score_right_query=start_query+2
index_of_max_score_right_target=start_target+2
while True:
#test index right
if ((current_index_target>(len(target)-1)) or (current_index_query > (len(query)-1))):
break
#test max score - X
if (score<=(max_score-X)):
break
#calc score for current index
score+=self.sub_matrix[AA_TO_INT[target[current_index_target]]][AA_TO_INT[query[current_index_query]]]
if score > max_score:
max_score=score
index_of_max_score_right_query=current_index_query
index_of_max_score_right_target=current_index_target
current_index_target+=1
current_index_query+=1
#if target[0:5]=="MNFAE" and False:
# print("Score:",score, "maxScore",max_score,"current_index_target",current_index_target,"current_index_query",current_index_query)
#extend left
current_index_query=start_query-1
current_index_target=start_target-1
score=max_score
index_of_max_score_left_query=start_query
index_of_max_score_left_target=start_target
while True:
#test index left
if ((current_index_target<0) or (current_index_query <0)):
break
#test max score - X
if (score<=(max_score-X)):
break
#calc score for current index
score+=self.sub_matrix[AA_TO_INT[target[current_index_target]]][AA_TO_INT[query[current_index_query]]]
if score > max_score:
max_score=score
index_of_max_score_left_query=current_index_query
index_of_max_score_left_target=current_index_target
current_index_target-=1
current_index_query-=1
return (index_of_max_score_left_query,index_of_max_score_left_target,index_of_max_score_right_target-index_of_max_score_left_target+1,max_score)
def findHSP_PSSM(self,word,target,pssm,start_target,start_query,S,X):
#score for first 3 AAs
score=pssm[start_query][AA_TO_INT[target[start_target]]]
score+=pssm[start_query+1][AA_TO_INT[target[start_target+1]]]
score+=pssm[start_query+2][AA_TO_INT[target[start_target+2]]]
#extend right
current_index_query=start_query+3
current_index_target=start_target+3
max_score=score
index_of_max_score_right_query=start_query+2
index_of_max_score_right_target=start_target+2
while True:
#test index right
if ((current_index_target>(len(target)-1)) or (current_index_query > (pssm.shape[0]-1))):
break
#test max score - X
if (score<=(max_score-X)):
break
#calc score for current index
score+=pssm[current_index_query][AA_TO_INT[target[current_index_target]]]
if score > max_score:
max_score=score
index_of_max_score_right_query=current_index_query
index_of_max_score_right_target=current_index_target
current_index_target+=1
current_index_query+=1
#if target[0:5]=="MNFAE" and False:
# print("Score:",score, "maxScore",max_score,"current_index_target",current_index_target,"current_index_query",current_index_query)
#extend left
current_index_query=start_query-1
current_index_target=start_target-1
score=max_score
index_of_max_score_left_query=start_query
index_of_max_score_left_target=start_target
while True:
#test index left
if ((current_index_target<0) or (current_index_query <0)):
break
#test max score - X
if (score<=(max_score-X)):
break
#calc score for current index
score+=pssm[current_index_query][AA_TO_INT[target[current_index_target]]]
if score > max_score:
max_score=score
index_of_max_score_left_query=current_index_query
index_of_max_score_left_target=current_index_target
current_index_target-=1
current_index_query-=1
return (index_of_max_score_left_query,index_of_max_score_left_target,index_of_max_score_right_target-index_of_max_score_left_target+1,max_score)
def useIndex(self,item):
return item[1]
def getAllOccurances(self,string,substring):
out=set()
start=0
while True:
start=string.find(substring,start)
if start==-1:
break
out.add(start)
start+=1
return out
def precomputePossibleWord2(self,A):
#if start_query2-start_query<=A and start_query2>(start_query+2):
out={}#dict()
for word,start_query in self.word_list:
temp=[]
for word2,start_query2 in self.word_list:
if start_query2-start_query>A:
break
if start_query2>(start_query+2):
temp.append((word2,start_query2))
out[start_query]=temp
return out
def check_contained(self,d2,target,start_query,offset):
if (target,offset) in d2:
if start_query<d2[(target,offset)]:
return False
return True
##do stuff with sequences
def preSeq(self,db_blast):
#dict with [word]=seqs containing word
self.dict_contained={}
self.dict_positions_of_word_in_seq={}
for i in range (8000):
word=self.get_word(i)
seqs=set()
for sequence in db_blast.getAll():
if word in sequence:
seqs.add(sequence)
self.dict_positions_of_word_in_seq[(word,sequence)]=self.getAllOccurances(sequence,word)
self.dict_contained[word]=seqs
def precomputePossibleWord2_for_target(self,A,dict_getSequences,dict_word_combs):
#if start_query2-start_query<=A and start_query2>(start_query+2):
out={}#dict()
for word,start_query in self.word_list:
temp=[]
for word2,start_query2 in dict_word_combs[start_query]:
distance_query2_query1=start_query2-start_query
for target in dict_getSequences[word]:
if target in dict_getSequences[word2]:
for start_target in self.dict_positions_of_word_in_seq[(word,target)]:
end=start_target-start_query+start_target
if target[end:end+3]==word2:
out[word]=end
return out
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
dict_getSequences=self.dict_contained
dict_word_in_seq_positions=self.dict_positions_of_word_in_seq
words=self.get_words(sequence=query,pssm=pssm,T=T)
dict_word_combs=self.precomputePossibleWord2(A)
#dict_word_combs=self.precomputePossibleWord2_for_target(A,dict_getSequences,dict_word_combs)
#return None
d = {}#dict()
d2 = {}#dict()
for word,start_query in self.word_list:
#print(self.word_list)
for target in dict_getSequences[word]:
for start_target in dict_word_in_seq_positions[(word,target)]:
offset_target_query=start_query-start_target
if self.check_contained(d2,target,start_query,offset_target_query):
for word2,start_query2 in dict_word_combs[start_query]:
#for word2,start_query2 in dict_word_combs[start_query,target]:########ALSO ADD OFFSET HERE TO RETURN ONLY ONE!!!!
offset=start_query2-start_query
#target_word=target[start_target+offset:start_target+offset+3]
end=offset+start_target
if target[end:end+3]==word2:
if query!=None:
temp=self.findHSP_NEW(target,query,start_target,end,start_query,start_query2,X)
else:
temp=self.findHSP_NEW_PSSM(target,pssm,start_target,end,start_query,start_query2,X)
#save hsp
if temp[3]>0:
#d2[(target,offset_target_query)]=temp[0]+temp[2]+2
d2[(target,temp[0]-temp[1])]=(temp[0]+temp[2]+2)
#min score
if temp[3]>=S:
if target in d:
existing_tuples=d[target]
else:
existing_tuples=[]
if not temp in existing_tuples:
existing_tuples.append(temp)
d[target]=existing_tuples
break
return d
def findHSP_NEW(self,target,query,start_target,start_target2,start_query,start_query2,X):
#score for first 3 AAs
#score=self.sub_matrix[AA_TO_INT[target[start_target2]]][AA_TO_INT[query[start_query2]]]+self.sub_matrix[AA_TO_INT[target[start_target2+1]]][AA_TO_INT[query[start_query2+1]]]+self.sub_matrix[AA_TO_INT[target[start_target2+2]]][AA_TO_INT[query[start_query2+2]]]
score=self.word_score[(query[start_query2:start_query2+3],target[start_target2:start_target2+3])]
#extend left
current_index_query=start_query2-1
current_index_target=start_target2-1
maxscore=score
index_of_max_score_left_query=start_query2
index_of_max_score_left_target=start_target2
#reached=False
#while True:
while ((current_index_target>=0) and (current_index_query >= 0)) and (score>(maxscore-X)):
score+=self.sub_matrix[AA_TO_INT[target[current_index_target]]][AA_TO_INT[query[current_index_query]]]
if score > maxscore:
maxscore=score
index_of_max_score_left_query=current_index_query
index_of_max_score_left_target=current_index_target
current_index_target-=1
current_index_query-=1
if current_index_query<=start_query+1:
#if reached:
#extend right
current_index_query=start_query2+3
current_index_target=start_target2+3
score=maxscore
index_of_max_score_right_query=start_query2+2
index_of_max_score_right_target=start_target2+2
len_target=len(target)-1
len_query=len(query)-1
while (current_index_target<=(len_target)) and (current_index_query <= (len_query)) and (score>(maxscore-X)):
score+=self.sub_matrix[AA_TO_INT[target[current_index_target]]][AA_TO_INT[query[current_index_query]]]
if score > maxscore:
maxscore=score
index_of_max_score_right_query=current_index_query
index_of_max_score_right_target=current_index_target
current_index_target+=1
current_index_query+=1
#if target[0:5]=="MNFAE" and False:
# print("Score:",score, "maxScore",maxscore,"current_index_target",current_index_target,"current_index_query",current_index_query)
else:
return (0,0,0,0)
return (index_of_max_score_left_query,index_of_max_score_left_target,index_of_max_score_right_target-index_of_max_score_left_target+1,maxscore)
def findHSP_NEW_PSSM(self,target,pssm,start_target,start_target2,start_query,start_query2,X):
#score for first 3 AAs
#score=pssm[start_query2][AA_TO_INT[target[start_target2]]]
#score+=pssm[start_query2+1][AA_TO_INT[target[start_target2+1]]]
#score+=pssm[start_query2+2][AA_TO_INT[target[start_target2+2]]]
score=pssm[start_query2][AA_TO_INT[target[start_target2]]]+pssm[start_query2+1][AA_TO_INT[target[start_target2+1]]]+pssm[start_query2+2][AA_TO_INT[target[start_target2+2]]]
#extend left
current_index_query=start_query2-1
current_index_target=start_target2-1
maxscore=score
index_of_max_score_left_query=start_query2
index_of_max_score_left_target=start_target2
reached=False
while True:
#test index left
if ((current_index_target<0) or (current_index_query <0)):
break
#test max score - X
if (score<=(maxscore-X)):
break
#calc score for current index
if current_index_query<=start_query+2:
reached = True
#score+=self.sub_matrix[AA_TO_INT[target[current_index_target]]][AA_TO_INT[query[current_index_query]]]
score+=pssm[current_index_query][AA_TO_INT[target[current_index_target]]]
if score > maxscore:
maxscore=score
index_of_max_score_left_query=current_index_query
index_of_max_score_left_target=current_index_target
current_index_target-=1
current_index_query-=1
if reached:
#extend right
current_index_query=start_query2+3
current_index_target=start_target2+3
score=maxscore
index_of_max_score_right_query=start_query2+2
index_of_max_score_right_target=start_target2+2
while True:
#test index right
if ((current_index_target>(len(target)-1)) or (current_index_query > (pssm.shape[0]-1))):
break
#test max score - X
if (score<=(maxscore-X)):
break
#calc score for current index
# score+=self.sub_matrix[AA_TO_INT[target[current_index_target]]][AA_TO_INT[query[current_index_query]]]
score+=pssm[current_index_query][AA_TO_INT[target[current_index_target]]]
if score > maxscore:
maxscore=score
index_of_max_score_right_query=current_index_query
index_of_max_score_right_target=current_index_target
current_index_target+=1
current_index_query+=1
#if target[0:5]=="MNFAE" and False:
# print("Score:",score, "maxScore",maxscore,"current_index_target",current_index_target,"current_index_query",current_index_query)
else:
return (0,0,0,0)
return (index_of_max_score_left_query,index_of_max_score_left_target,index_of_max_score_right_target-index_of_max_score_left_target+1,maxscore)
<file_sep>import numpy as np
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
self.msa = sequences
# Check number of sequences
at_least_one_seq = len(self.msa) >= 1
# Check for same seq length
same_seq_length = all(len(seq) == len(self.msa[0]) for seq in self.msa)
# Check if the MSA is valid (using the provided alphabet)
valid_msa = True
for seq in self.msa:
valid_msa = valid_msa & all(aa_gap in ALPHABET for aa_gap in seq)
if not (at_least_one_seq and same_seq_length and valid_msa):
raise TypeError("The given MSA is not valid. Check that sequences only contain valid Amino acids and gap characters")
else:
self.seq_matrix = np.array([ list(seq) for seq in self.msa ])
self.pssm = np.zeros(self.get_size(), np.float64)
def __get_bg_frequecies(self, bg_matrix, aa):
row_sums = np.sum(bg_matrix, axis=1)
_aa = INT_TO_AA[aa]
return row_sums[AA_TO_INT[_aa]]
def __get_aa_count(self):
sums = []
for c in list(ALPHABET):
sums.append(np.count_nonzero(self.seq_matrix == c, axis=0))
pre_pssm = np.array([ s for s in sums ], np.float64)
self.pssm = pre_pssm.transpose()
return self.pssm
def __get_aa_relative_frequencies(self, matrix):
row_sums = np.sum(matrix, axis=1)
self.pssm = matrix / row_sums[:, np.newaxis]
return self.pssm
def __get_aa_divided_by_background_frequencies(self, matrix, bg_matrix):
self.pssm = matrix
if (bg_matrix == None):
self.pssm = matrix / 0.05
else:
for i in range(0, self.pssm.shape[1]) :
self.pssm[:, i] = matrix[:, i] / self.__get_bg_frequecies(bg_matrix, i)
return self.pssm
def __get_aa_log_score(self, matrix):
self.pssm = 2 * np.log2(matrix)
self.pssm[np.isneginf(self.pssm)] = -20
return self.pssm
def __redistribute_gaps(self, matrix, bg_matrix):
self.pssm = matrix
if (bg_matrix == None):
bg_f = 0.05
else:
bg_f = 0.05
p = matrix[: ,self.pssm.shape[1]-1] * bg_f
np.copyto(self.pssm[: ,self.pssm.shape[1]-1], p)
for row in range(0, matrix.shape[0]):
for col in range(0, matrix.shape[1]-1):
self.pssm[row, col] = matrix[row, col] + p[row]
return self.pssm
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
# Amino acid counts
self.pssm = self.__get_aa_count()
# Redistribute gaps
if (redistribute_gaps == True):
self.pssm = self.__redistribute_gaps(self.pssm, bg_matrix)
# Remove gap column
self.pssm = np.delete(self.pssm, self.pssm.shape[1]-1, 1)
# Get amino acid relative frequencies
self.pssm = self.__get_aa_relative_frequencies(self.pssm)
# Divide by background frequencies
self.pssm = self.__get_aa_divided_by_background_frequencies(self.pssm, bg_matrix)
# Get Log-score
self.pssm = self.__get_aa_log_score(self.pssm)
# Remove gap rows
primary_seq_list = list(self.msa[0])
to_delete = [i for i, e in enumerate(primary_seq_list) if e == '-']
self.pssm = np.delete(self.pssm, to_delete, 0)
return np.rint(self.pssm).astype(np.int64)
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
no_seq = len(self.msa)
msa_length = len(self.msa[0])
return (no_seq, msa_length)
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
seq = self.msa[0].replace("-", "")
return seq
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
seq_w_m = np.zeros(self.get_size(), np.float64)
r = []
for column in self.seq_matrix.T:
r.append(len(np.unique(column)))
row_idx = 0
col_idx = 0
for col_idx in range(0, self.seq_matrix.shape[1]):
for row_idx in range(0, self.seq_matrix.shape[0]):
p = self.seq_matrix[row_idx, col_idx]
u, c = np.unique(self.seq_matrix[:,col_idx], return_counts = True)
uc = dict(zip(u,c))
w = 1/(uc[p] * r[col_idx])
seq_w_m[row_idx, col_idx] = w
seq_w_m2 = np.insert(seq_w_m, seq_w_m.shape[0], r, axis = 0).transpose()
for row in seq_w_m2:
if row[-1] == 1:
remove = np.where(np.all(seq_w_m2 == row, axis = 1))
for row in remove:
seq_w_m3 = np.delete(seq_w_m2, row, axis = 0)
seq_w_m4 = np.delete(seq_w_m3, seq_w_m3.shape[1]-1, 1)
weights = np.sum(seq_w_m4, axis=0)
return weights.astype(np.float64)
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
num_obs = -1
r = []
for column in self.seq_matrix.T:
r.append(len(np.unique(column)))
num_obs = np.array(sum(r)/len(r), np.float64)
return num_obs.astype(np.float64)<file_sep>##############
# Exercise 1.6
##############
CHARGED = ['H', 'K', 'D', 'E', 'R']
POSITIVE = ['H', 'K', 'R']
NEGATIVE = ['D', 'E']
HYDROPHOBIC = ['F', 'L', 'I', 'M', 'V', 'A', 'Y', 'W']
AROMATIC = ['F', 'Y', 'H', 'W']
POLAR = ['D', 'E', 'R', 'H', 'K', 'N', 'Q', 'S', 'T', 'Y']
PROLINE = ['P']
SULFUR = ['M', 'C']
ACIDIC = ['D', 'E']
BASIC = ['H', 'R', 'K']
def isCharged(aa):
return aa in CHARGED
def isPositivelyCharged(aa):
return aa in POSITIVE
def isNegativelyCharged(aa):
return aa in NEGATIVE
def isHydrophobic(aa):
return aa in HYDROPHOBIC
def isAromatic(aa):
return aa in AROMATIC
def isPolar(aa):
return aa in POLAR
def isProline(aa):
return aa in PROLINE
def containsSulfur(aa):
return aa in SULFUR
def isAcid(aa):
return aa in ACIDIC
def isBasic(aa):
return aa in BASIC
<file_sep>import numpy as np
import os
import pandas as pd
from selenium import webdriver
import sys
ex=sys.argv[1]
students=dict()
all=pd.read_csv('../additional_info/student_lists/allstudents.csv', sep=',',header=None)
for s in all.values:
tmp={s[0]: s[1]}
students.update(tmp)
agg=''
students_1=pd.read_csv('students_'+str(ex)+'.csv', sep=',',header=None)
for s in students_1.values:
agg+=students[s[0]]
agg+=','
agg=agg[:-1]
print(agg)
<file_sep>##############
# Exercise 2.5
##############
# You can use the supplied test cases for your own testing. Good luck!
from lib import codons_to_aa
from lib import stop_symbol
from lib import start_symbol
from lib import complementary
def get_orfs(genome):
if not all(['TCAG'.find(x) != -1 for x in genome]):
raise TypeError("not a valid DNA sequence")
sequences = []
orig_length = len(genome)
genome = genome*3 #3 times so we are always convert into a valid aa-sequence + we can read circular genoms
#all sequences
sequences.append(genome)
sequences.append(genome[1:]+genome[:1])
sequences.append(genome[2:]+genome[:2])
#reverse sequences
sequences.append(complementary(sequences[0])[::-1])
sequences.append(complementary(sequences[1])[::-1])
sequences.append(complementary(sequences[2])[::-1])
# convert to aa
aa_sequences = list(map(codons_to_aa,sequences))
orfs = []
for seq_idx, seq in enumerate(aa_sequences):
start_found = False
start_idx = 0
for idx, acid in enumerate(seq):
if(idx > orig_length/3 and not start_found ):
break
if(start_found and acid == stop_symbol):
start_found = False
if(idx-start_idx> 33):
if(seq_idx < 3):
orfs.append((start_idx*3+seq_idx, (idx*3+2+seq_idx)%orig_length, seq[start_idx: idx], False))
else:#reverse sequences
start = orig_length-(start_idx*3-2 +seq_idx)
end = orig_length - (idx*3 +seq_idx)
if end <0 :
start = start - end
end = 0
orfs.append((start, end, seq[start_idx: idx], True))
elif(not start_found and acid == start_symbol):
start_found = True
start_idx = idx
orfs.sort(key= lambda tup: (tup[1], tup[0]))
without_short = [] #remove short that are included because of circular dependencys
for orf in orfs:
remove_idx = -1
for idx,long_orfs in enumerate(without_short):
if long_orfs[1] == orf[1] and long_orfs[3] == orf[3]:
remove_idx = idx
break
if remove_idx != -1:
del without_short[remove_idx]
without_short.append(orf)
return without_short<file_sep>##############
# Exercise 2.5
##############
# You can use the supplied test cases for your own testing. Good luck!
import aa_dict
import sys
def get_triplets(genome):
triplets_list = []
for i in range(int(len(genome)/3)):
triplets_list.append(genome[i*3:i*3 + 3])
return triplets_list
def get_reverse_complement(genome):
return "".join([aa_dict.complements[aa] for aa in genome])[::-1]
def read_frames(genome):
# Primary
frame1 = get_triplets(genome)
frame2 = get_triplets(genome[1:])
frame3 = get_triplets(genome[2:])
# Reverse Complements
reverse_complement = get_reverse_complement(genome)
frame4 = get_triplets(reverse_complement)
frame5 = get_triplets(reverse_complement[1:])
frame6 = get_triplets(reverse_complement[2:])
return (frame1, frame2, frame3, frame4, frame5, frame6)
def translate(orf):
translated = ""
for triplet in orf[:-1]:
translated += aa_dict.RNA[triplet]
return translated
def get_orfs(genome):
for AA in genome:
if AA not in aa_dict.complements:
raise TypeError
#print ("Genome[1121:1121+3] = {}".format(genome[1121:1121+3]))
frames = read_frames(genome)
start_stop_included = False
for frame in frames:
if "ATG" in frame:
if "TAA" in frame or "TAG" in frame or "TGA" in frame:
start_stop_included = True
if not start_stop_included:
raise TypeError
dna_list = []
# Normal order
frame_number = -1
for frame in frames:
frame_number+=1
start = -1
start_list = []
triplet_index = -1
frame += frame
for aa_triplet in frame:
triplet_index += 1
# Search for start
if aa_triplet == "ATG" and start == -1:
start = triplet_index
# If start is found search for stop
if start >= 0:
if (aa_triplet == "TAA" or aa_triplet == "TAG" or aa_triplet == "TGA"):
stop = triplet_index + 1
start_index = 0
stop_index = 0
# for primary strand
if frame_number < 3:
start_index = (start*3 + frame_number) % (len(frame)*3/2)
stop_index = (stop*3 + frame_number - 1) % (len(frame)*3/2)
else:
start_index = (len(genome) - (start*3 + (frame_number-3)) -1) % ((len(frame)*3.0)/2)
stop_index = (len(genome) - (stop*3 +(frame_number-3) -1) -1) % ((len(frame)*3.0)/2)
dna_list.append((int(start_index), int(stop_index), frame[start:stop], frame_number > 2, frame_number))
start = -1
new_dna_list = []
for dna in dna_list:
if len(dna[2]) > 34:
#if True:
#print (dna)
dna = dna[0], dna[1], translate(dna[2]), dna[3]
print (dna)
new_dna_list.append(dna)
# Find start in reversed
return new_dna_list
<file_sep>import numpy as np
import json
from itertools import product, permutations
from operator import itemgetter
import re
import time
from pathlib import Path
##
def json_data():
test_json = 'tests/blast_test.json'
relative_path = Path(__file__).parent
with Path(relative_path, test_json).open('r') as json_file:
json_data = json.load(json_file)
return json_data
def db_sequences(json_data):
return json_data['db_sequences']
def db_stats(json_data):
return (json_data['db_stats'])
def db_seqs_for_word(json_data):
return json_data['db_seqs_for_word']
def sub_matrix(json_data):
return np.array(json_data['sub_matrix'], dtype=np.int64)
def query_seq(json_data):
return json_data['query_seq']
def query_pssm(json_data):
return np.array(json_data['query_pssm'], dtype=np.int64)
def blast_words(json_data):
return json_data['blast_words']
def blast_words_pssm(json_data):
return json_data['blast_words_pssm']
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.database = []
self.all_words = list(''.join(p) for p in product(ALPHABET, repeat=3))
self.word_seq = {}
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
self.database.append(sequence)
for word in self.all_words:
if word in sequence:
if word not in self.word_seq:
self.word_seq[word] = []
self.word_seq[word].append(sequence)
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
if word in self.word_seq:
return self.word_seq[word]
for seq in self.database:
if word in seq:
if word not in self.word_seq:
self.word_seq[word] = []
self.word_seq[word].append(seq)
if word in self.word_seq:
return word_seq[word]#[seq for seq in self.database if word in seq]
else:
return []
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
table = {}
unique_words = set()
avg_words = 0.0
for seq in self.database:
table[seq] = set()
for i in range(len(seq) - 2):
unique_words.add(seq[i:i + 3])
table[seq].add(seq[i:i + 3])
avg_words += len(table[seq])
n_seqs = len(self.database)
avg_words /= n_seqs
n_words = len(unique_words)
avg_seqs = sum([len(table[seq]) for seq in self.database]) / n_words
return (n_seqs, n_words, round(avg_words), round(avg_seqs))
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.sm = substitution_matrix
self.all_words = list(''.join(p) for p in product(ALPHABET, repeat=3))
self.w2i = {w: index for index, w in enumerate(self.all_words)}
self.i2w = {index: w for index, w in enumerate(self.all_words)}
self.cache = np.ones((len(self.all_words), len(self.all_words))) * -5000
self.query_cache = []
self.query_cache_pssm = []
self.words = {}
self.rk = {}
self.ws_match = {}
self.count = 0
self.wi = {}
def _get_score(self, word1, word2, query):
if query:
if self.cache[self.w2i[word1], self.w2i[word2]] != -5000:
return self.cache[self.w2i[word1], self.w2i[word2]]
score = 0
for i in range(len(word1)):
score += self.sm[AA_TO_INT[word1[i]], AA_TO_INT[word2[i]]] if query else word2[i, AA_TO_INT[word1[i]]]
if query:
self.cache[self.w2i[word1], self.w2i[word2]] = score
return score
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
"""
if sequence:
if len(self.query_cache) > 0:
return self.query_cache
words = set()
for word in self.all_words:
for i in range(len(sequence or pssm) - 2):
word2 = (sequence or pssm)[i:i + 3]
score = self._get_score(word, word2, sequence)
if score >= T:
words.add(word)
return list(words)
"""
words = set()
for word in self.all_words:
for i in range(len(sequence or pssm) - 2):
word2 = (sequence or pssm)[i:i + 3]
score = self._get_score(word, word2, sequence)
if score >= T:
words.add(word)
return list(words)
def _find_hsp(self, start_q, start_s, score, query, pssm, seq, X):
length = 3
all_hsps = [(start_q, start_s, length, score)]
max_score = score
# right
limit = min(len(query or pssm) - (start_q + 1), len(seq) - (start_s + 1)) + 1
for i in range(3, limit):
length += 1
score += self.sm[AA_TO_INT[seq[start_s + i]], AA_TO_INT[query[start_q + i]]] if query else pssm[start_q + i, AA_TO_INT[seq[start_s + i]]]
all_hsps.append((start_q, start_s, length, int(score)))
if score > max_score:
max_score = score
if score <= max_score - X:
break
all_hsps = [hsp for hsp in all_hsps if hsp[3] == max_score]
all_hsps = sorted(all_hsps, key=lambda x: x[2])
hsp = all_hsps[0]
length = hsp[2]
all_hsps = []
all_hsps.append(hsp)
score = max_score
# left
limit = min(start_q, start_s) + 1
for i in range(1, limit):
length += 1
score += self.sm[AA_TO_INT[seq[start_s - i]], AA_TO_INT[query[start_q - i]]] if query else pssm[start_q - i, AA_TO_INT[seq[start_s - i]]]
all_hsps.append((start_q - i, start_s - i, length, int(score)))
if score > max_score:
max_score = score
if score <= max_score - X:
break
all_hsps = [hsp for hsp in all_hsps if hsp[3] == max_score]
all_hsps = sorted(all_hsps, key=lambda x: x[2])
hsp = all_hsps[0]
return hsp
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
s. Each is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP s.
"""
res = {}
words = self.get_words(sequence=query, pssm=pssm, T=T)
wi = self.get_wi(query, pssm, T, words)
for word in words:
for i in range(len(query or pssm) - 2):
word2 = (query or pssm)[i:i + 3]
score = self._get_score(word, word2, query)
if score >= T:
start_q = i
for seq in blast_db.get_sequences(word):
for j in range(len(seq) - 2):
if word == seq[j:j + 3]:
start_s = j
hsp = self._find_hsp(start_q, start_s, score, query, pssm, seq, X)
if hsp[3] >= S:
if seq not in res:
res[seq] = []
if hsp not in res[seq]:
res[seq].append(hsp)
return res
def _find_hsp_2(self, hit_l, hit_r, dist, query, pssm, seq, X):
#(24, 103, 13), (35, 114, 14)
#print(hit_l, hit_r)
length = 3
start_q_l, start_s_l, score_l = hit_l
start_q_r, start_s_r, score_r = hit_r
all_hsps = []
all_hsps.append((start_q_r, start_s_r, length, score_r))
#print(all_hsps)
max_score_r = score_r
#print(start_q_r, start_s_r)
# left
limit = min(start_q_r, start_s_r) + 1
for i in range(1, limit):
#for i in range(1, dist - 2):
length += 1
score_r += self.sm[AA_TO_INT[seq[start_s_r - i]], AA_TO_INT[query[start_q_r - i]]] if query else pssm[start_q_r - i, AA_TO_INT[seq[start_s_r - i]]]
all_hsps.append((start_q_r - i, start_s_r - i, length, score_r))
all_hsps = sorted(all_hsps, key=lambda x: (-x[3], x[2]))
if score_r > max_score_r:
max_score_r = score_r
#print(all_hsps)
if score_r <= max_score_r - X:
if start_q_r - i <= start_q_l + 2:
break
else:
return ()
#print(all_hsps)
#all_hsps = [hsp for hsp in all_hsps if hsp[3] == max_score_r]
#all_hsps = sorted(all_hsps, key=lambda x: x[2])
hsp = all_hsps[0]
score = max_score_r# + score_l
all_hsps = [hsp]
length = hsp[2]
max_score = score
#print(all_hsps)
start_s = hsp[1] + hsp[2] - 3
start_q = hsp[0] + hsp[2] - 3
# right
limit = min(len(query or pssm) - (start_q + 1), len(seq) - (start_s + 1)) + 1
for i in range(3, limit):
length += 1
score += self.sm[AA_TO_INT[seq[start_s + i]], AA_TO_INT[query[start_q + i]]] if query else pssm[start_q + i, AA_TO_INT[seq[start_s + i]]]
all_hsps.append((hsp[0], hsp[1], length, score))
all_hsps = sorted(all_hsps, key=lambda x: (-x[3], x[2]))
if score > max_score:
max_score = score
if score <= max_score - X:
break
#print(all_hsps)
###print(max_score)
#all_hsps = [hsp for hsp in all_hsps if hsp[3] == max_score]
#all_hsps = sorted(all_hsps, key=lambda x: x[2])
hsp = all_hsps[0]
###print(hsp)
return hsp
def included(self, start_q_l, start_q_r, start_s_l, start_s_r, start_q, start_s, l):
return (((start_q_l + 3 > start_q and start_q_l + 3 < start_q + l) or (start_q_r < start_q + l and start_q_r + 3 > start_q)) and (abs(start_q - start_s) == abs(start_q_l - start_s_l))) or (((start_s_l + 3 > start_s and start_s_l + 3 < start_s + l) or (start_s_r < start_s + l and start_s_r + 3 > start_s)) and (abs(start_q - start_s) == abs(start_q_r - start_s_r)))
def rabin_karp(self, pattern, text):
p_len = 3
p_hash = hash(pattern) #for pattern in patterns]
for i in range(0, len(text) - (p_len - 1)):
# written like this t
text_hash = hash(text[i:i + p_len])
if text_hash == p_hash and text[i:i + p_len] == pattern:
yield i
def rabin_karp2(self, patterns, text):
p_len = 3
p_hash = [hash(pattern) for pattern in patterns]
for i in range(0, len(text) - (p_len - 1)):
# written like this t
text_hash = hash(text[i:i + p_len])
if text_hash in p_hash and text[i:i + p_len] in patterns:
yield i
def get_wi(self, query, pssm, T, words):
if (query, T) in self.wi:
return self.wi
if query:
m = np.zeros((3, len(query), len(words)))
for i in range(3):
for j in range(len(query)):
for k in range(len(words)):
m[i, j, k] = self.sm[AA_TO_INT[words[k][i]], AA_TO_INT[query[j]]]
#m = np.fromfunction(lambda i, j, k: self.sm[int(AA_TO_INT[words[k][i]]), int(AA_TO_INT[query[j]])], (3, len(query), len(words)))
else:
m = np.zeros((3, len(pssm), len(words)))
for i in range(3):
for j in range(len(pssm)):
for k in range(len(words)):
m[i, j, k] = pssm[j, AA_TO_INT[words[k][i]]]
wi = {}
for k in range(m.shape[2]):
for o in range(0, m.shape[1] - 2):
s = np.trace(m[:, :, k], offset=o)
if s >= T:
if words[k] not in wi:
wi[words[k]] = []
wi[words[k]].append((o, s))
self.wi[(query, T)] = wi
return wi
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
s. Each is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP s.
"""
res = {}
words = self.get_words(sequence=query, pssm=pssm, T=T)
wi = self.get_wi(query, pssm, T, words)
#print(wi)
candidates = []
for seq in blast_db.database:
#for word in words:
#for seq in blast_db.get_sequences(word):
#tick = time.time()
#hits = list(self.rabin_karp(word, seq))
#print(time.time() -tick)
#tick = time.time()
hits = list(self.rabin_karp2(words, seq))
#print(time.time() -tick)
for hit_l in hits:
for dist in range(3, min(A + 1, len(seq) - hit_l - 2)):
if hit_l + dist in hits:
word_l = seq[hit_l:hit_l + 3]
hit_r = hit_l + dist
word_r = seq[hit_r:hit_r + 3]
if word_l in wi and word_r in wi:
for i, score_l in wi[word_l]:
for j, score_r in wi[word_r]:
if j - i == dist:
#if i + dist >= 0 and i + dist + 3 < len(query or pssm):
#word_r = seq[hit_l + dist: hit_l + dist + 3]
#word2_r = (query or pssm)[i + dist:i + dist + 3]
#score_r = self._get_score(word_r, word2_r, query)
#if score_r >= T and word_r in words:
#j = i + dist
#hit_r = hit_l + dist
left = (i, hit_l, score_l)
right = (j, hit_r, score_r)
#print(left, right)
check = True
if seq in res:
for hsp in res[seq]:
#if left == (27, 24, 16.0):
#print(i, j, hit_l, hit_r, hsp[0], hsp[1], hsp[2])
if self.included(i, j, hit_l, hit_r, hsp[0], hsp[1], hsp[2]):
check = False
break
if check:
hsp = self._find_hsp_2(left, right, dist, query, pssm, seq, X)
if len(hsp) > 0:
if seq not in res:
res[seq] = []
res[seq].append(hsp)
f = {}
for seq in res:
res[seq] = list(set(res[seq]))
res[seq] = [hsp for hsp in res[seq] if hsp[3] >= S]
if len(res[seq]) > 0:
f[seq] = res[seq]
return f
if __name__ == "__main__":
blast = Blast(sub_matrix(json_data()))
blast_db = BlastDb()
for s in db_sequences(json_data()):
blast_db.add_sequence(s)
##print(blast_db.get_db_stats())
##print(db_stats(json_data()))
blast = Blast(sub_matrix(json_data()))
words = blast.get_words(sequence=query_seq(json_data()), T=13)
##print(len(words))
##print(len(blast_words(json_data())))
#words = blast.get_words(pssm=query_pssm(json_data()), T=11)
###print(len(words))
###print(len(blast_words_pssm(json_data())))
blast = Blast(sub_matrix(json_data()))
blast_db = BlastDb()
for s in ["<KEY>IL<KEY>"]: #["<KEY>DLCIGPAKCAPNNREGYNGYTGAFQCLVEKGDVAFVKHQTVLENTNGKNTAAWAKDLKQEDFQLLCPDGTKKPVTEFATCHLAQAPNHVVVSRKEKAARVSTVLTAQKDLFWKGDKDCTGNFCLFRSSTKDLLFRDDTKCLTKLPEGTTYEEYLGAEYLQAVGNIRKCSTSRLLEACTFHKS"]:
blast_db.add_sequence(s)
results = blast.search_two_hit(blast_db, query="MVATGLFVGLNKGHVVTKREQPPRPNNRKGKTSKRTIFIRNLIKEVAGQAPYEKRITELLKVGKDKRALKVAKRKLGTHKRAKRKREEMSSVLRKMRSGGAGASEKKK", T=11, X=5, S=30, A=40)
#print(results)
#results = blast.search_two_hit(blast_db, pssm=query_pssm(json_data()), T=11, X=5, S=30, A=40)
print(results)
seq="MEEDIDTRKINNSFLRDHSYATEADIISTVEFNHTGELLATGDKGGRVVIFQREQESKNQVHRRGEYNVYSTFQSHEPEFDYLKSLEIEEKINKIRWLPQQNAAYFLLSTNDKTVKLWKVSERDKRPEGYNLKDEEGRLRDPATITTLRVPVLRPMDLMVEATPRRVFANAHTYHINSISVNSDYETYMSADDLRINLWNFEITNQSFNIVDIKPANMEELTEVITAAEFHPHHCNTFVYSSSKGTIRLCDMRASALCDRHTKFFEEPEDPSNRSFFSEIISSISDVKFSHSGRYIMTRDYLTAKVWDLNMENRPVETYQVHDYLRSKLCSLYENDCIFDKFECVWNGSDSVIMTGSYNNFFRMFDRNTKRDVTLEASRENSKPRAILKPRKVCVGGKRRKDEISVDSLDFSKKILHTAWHPSENIIAVAATNNLYIFQDKVN"
query="MVATGLFVGLNKGHVVTKREQPPRPNNRKGKTSKRTIFIRNLIKEVAGQAPYEKRITELLKVGKDKRALKVAKRKLGTHKRAKRKREEMSSVLRKMRSGGAGASEKKK"
###print(query[74: 74+26])
score = 0
start_q = 19
start_seq = 265
for i in range(0, 10, 1):
score += sub_matrix(json_data())[AA_TO_INT[query[start_q + i]], AA_TO_INT[seq[start_seq + i]]]
#print(score)
<file_sep>import numpy as np
from pathlib import Path
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
WORD_LIST = []
for _,a in enumerate(ALPHABET):
for _,b in enumerate(ALPHABET):
for _,c in enumerate(ALPHABET):
WORD_LIST.append(a+b+c)
WORD_TO_INT = {w:index for index, w in enumerate(WORD_LIST)}
INT_TO_WORD = {index:w for index, w in enumerate(WORD_LIST)}
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.sequences=[]
self.num_seq_per_word = []
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
self.sequences.append(sequence)
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
result = [seq for seq in self.sequences if word in seq]
return result
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
num_seq = len(self.sequences)
unique_word = set()
num_words_per_seq = np.zeros((num_seq))
# store the candidate sequences where the word i occurs
self.num_seq_per_word = np.zeros((len(WORD_LIST),num_seq))
for s,seq in enumerate(self.sequences):
words = set()
for i in range(0,len(seq),3):
w1 = seq[i:i+3]
w2 = seq[i+1:i+4]
w3 = seq[i+2:i+5]
if len(w1)==3:
words.add(w1)
unique_word.add(w1)
self.num_seq_per_word[WORD_TO_INT[w1],s] = 1
if len(w2)==3:
words.add(w2)
unique_word.add(w2)
self.num_seq_per_word[WORD_TO_INT[w2],s] = 1
if len(w3)==3:
words.add(w3)
unique_word.add(w3)
self.num_seq_per_word[WORD_TO_INT[w3],s] = 1
num_words_per_seq[s] = len(words)
num_words = len(unique_word)
avg_words_per_sequence = np.sum(num_words_per_seq)/num_seq
avg_seq_per_word = np.sum(self.num_seq_per_word)/num_words
return (num_seq, num_words, int(np.rint(avg_words_per_sequence)), int(np.rint(avg_seq_per_word)))
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.substitution_matrix = substitution_matrix
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
# we need to store the position where the word allgined
if sequence != None:
uni_words = set()
for i in range(0,len(sequence)-2,1):
for w in WORD_LIST:
target = sequence[i:i+3]
score = self.substitution_matrix[AA_TO_INT[target[0]],AA_TO_INT[w[0]]]\
+self.substitution_matrix[AA_TO_INT[target[1]],AA_TO_INT[w[1]]]\
+self.substitution_matrix[AA_TO_INT[target[2]],AA_TO_INT[w[2]]]
if score>=T:uni_words.add(w)
else:
uni_words = set()
for i in range(0,len(pssm)-2,1):
for w in WORD_LIST:
score = pssm[i,AA_TO_INT[w[0]]]+pssm[i+1,AA_TO_INT[w[1]]]+pssm[i+2,AA_TO_INT[w[2]]]
if score >= T: uni_words.add(w)
return list(uni_words)
def get_words_with_position(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
# we need to store the position where the word allgined
if sequence != None:
uni_words = set()
for i in range(0,len(sequence)-2,1):
for w in WORD_LIST:
target = sequence[i:i+3]
score = self.substitution_matrix[AA_TO_INT[target[0]],AA_TO_INT[w[0]]]\
+self.substitution_matrix[AA_TO_INT[target[1]],AA_TO_INT[w[1]]]\
+self.substitution_matrix[AA_TO_INT[target[2]],AA_TO_INT[w[2]]]
if score>=T:uni_words.add((w,i,score))
else:
uni_words = set()
for i in range(0,len(pssm)-2,1):
for w in WORD_LIST:
score = pssm[i,AA_TO_INT[w[0]]]+pssm[i+1,AA_TO_INT[w[1]]]+pssm[i+2,AA_TO_INT[w[2]]]
if score >= T: uni_words.add((w,i,score))
return list(uni_words)
def find_all_word_index(self,seq, word):
"""
find all the index where the word appears in the sequence
:param seq:
:param word:
:return:
"""
index = []
for i in range(len(seq)):
if (seq[i:i + 3] == word): index.append(i)
return index
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
if query != None:
# sequence is provided
words = self.get_words_with_position(sequence=query,T = T)
for (w,p_in_query,start_score) in words:
# numpy array
# targets = blast_db.num_seq_per_word[WORD_TO_INT[w]]
# the word w exist in targets
# targets = np.where(targets==1)[0]
targets = blast_db.get_sequences(w)
for target in targets:
# target = blast_db.sequences[target_index]
p_in_targets = self.find_all_word_index(target,w)
for p_in_target in p_in_targets:
# the word w start in query at position p_in_query and start in target at position p_in_target
# start align and compute hsp score
max_score = start_score
score = start_score
# firstly start in direction right
i,j,stop_right_query,stop_right_target = p_in_query+3,p_in_target+3,p_in_query+2,p_in_target+2
while(i<len(query) and j<len(target) and max_score-score<X ):
# stop condition: 1, max-current>X or to the end of query or target
score = score + self.substitution_matrix[AA_TO_INT[query[i]],AA_TO_INT[target[j]]]
if score>max_score:
max_score = score
stop_right_query = i
stop_right_target = j
i = i + 1
j = j + 1
score = max_score
# in direction left
i, j, stop_left_query, stop_left_target = p_in_query-1, p_in_target-1, p_in_query , p_in_target
while (i >=0 and j >=0 and max_score - score < X):
# stop condition: 1, max-current>X or to the end of query or target
score = score + self.substitution_matrix[AA_TO_INT[query[i]], AA_TO_INT[target[j]]]
if score > max_score:
max_score = score
stop_left_query = i
stop_left_target = j
i = i - 1
j = j - 1
if max_score>=S:
if target in d.keys():
# remove dupplications: has same hsp score
d[target].append((stop_left_query,stop_left_target,stop_right_target-stop_left_target+1,max_score))
else:d[target] = [(stop_left_query,stop_left_target,stop_right_target-stop_left_target+1,max_score)]
else:
# pssm is provided
N,_ = pssm.shape
words = self.get_words_with_position(pssm=pssm, T=T)
for (w, p_in_query, start_score) in words:
# numpy array
# the word w exist in targets
targets = blast_db.get_sequences(w)
for target in targets:
p_in_targets = self.find_all_word_index(target, w)
for p_in_target in p_in_targets:
# the word w start in query at position p_in_query and start in target at position p_in_target
# start align and compute hsp score
max_score = start_score
score = start_score
# firstly start in direction right
i, j, stop_right_query, stop_right_target = p_in_query + 3, p_in_target + 3, p_in_query + 2, p_in_target + 2
while (i < N and j < len(target) and max_score - score < X):
# stop condition: 1, max-current>X or to the end of query or target
score = score + pssm[i, AA_TO_INT[target[j]]]
# score = score + pssm[i, 0]
if score > max_score:
max_score = score
stop_right_query = i
stop_right_target = j
i = i + 1
j = j + 1
score = max_score
# in direction left
i, j, stop_left_query, stop_left_target = p_in_query - 1, p_in_target - 1, p_in_query, p_in_target
while (i >= 0 and j >= 0 and max_score - score < X):
# stop condition: 1, max-current>X or to the end of query or target
score = score + pssm[i, AA_TO_INT[target[j]]]
# score = score + pssm[i, 0]
# score = score + self.substitution_matrix[AA_TO_INT[query[i]], AA_TO_INT[target[j]]]
if score > max_score:
max_score = score
stop_left_query = i
stop_left_target = j
i = i - 1
j = j - 1
if max_score >= S:
if target in d.keys():
# remove dupplications: has same hsp score
d[target].append((stop_left_query, stop_left_target,
stop_right_target - stop_left_target + 1, max_score))
else:
d[target] = [(stop_left_query, stop_left_target,
stop_right_target - stop_left_target + 1, max_score)]
for k in d.keys():
d[k] = set(d[k])
return d
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]
if __name__ == '__main__':
import json
with open("/Users/wangyu/Documents/Protein_prediction/exercise/pp1ss19exercise5-exercise-ge56sen/tests/blast_test.json") as f:
json_data = json.load(f)
db_sequences = json_data['db_sequences']
blast_db = BlastDb()
for s in db_sequences:
blast_db.add_sequence(s)
# seqs_for_word = blast_db.get_sequences('DEF')
# print(len(set(seqs_for_word)))
# print(len(set(json_data['db_seqs_for_word'])))
# print("db_stats = (7104, 8000, 485, 431)")
# print(blast_db.get_db_stats())
substitution_matrix = np.array(json_data['sub_matrix'], dtype=np.int64)
blast = Blast(substitution_matrix)
# query_pssm = np.array(json_data['query_pssm'], dtype=np.int64)
# words = blast.get_words(pssm=query_pssm, T=11)
# blast_words_pssm = json_data['blast_words_pssm']
# print(len(words))
# print(len(blast_words_pssm))
query_seq=json_data['query_seq']
# results = blast.search_one_hit(blast_db,query=query_seq,T=13,X=5,S=30)
#
#
def table_list_tuple(data):
for key, value in data.items():
data[key] = [tuple(x) for x in value]
return data
# compare = table_list_tuple(json_data['blast_hsp_one_hit_1'])
# target = "MRLGKPKGGISRSASQGKTYESKRKTARQRQKWGVAIRFDSGLSRRRRNVDEKPYKCTKCSKSFSQSSTLFQHKKIHTGKKSHKCADCGKSFFQSSNLIQHRRIHTGEKPYKCDECGERFKQSSNLIQHQRIHTGEKPYCCDECGRCFSQSSHLIQHQRTHTGEKPYQCEECDKCFSQSSHLRQHMKVHKEKKSHKRGKNARAKTHPVSWKRGKGRKAVAGLRQVKGAASGLFKKKK"
#
# print(len(results))
# print(len(compare))
# #
# print(len(set(results)))
# print(len(set(compare)))
# print(compare.values())
# print(results.values())
# results = blast.search_one_hit(blast_db,
# query=query_seq,
# T=13,
# X=7,
# S=40)
#
# compare = table_list_tuple(json_data['blast_hsp_one_hit_2'])
# print(len(results))
# print(len(compare))
# #
# print(len(set(results)))
# print(len(set(compare)))
# print(results.values())
# print(compare.values())
# keys = list(results.keys())
# # print(keys)
# for key in keys:
#
# a = compare[key]
# b = results[key]
# if (set(a)!=set(b)):
# print("******************")
# print(key)
# print(a)
# print(b)
# target = "MVKISFQPAVAGVKAEKADKAAASGPASASAPAAEILLTPAREERPPRHRSRKGGSVGGVCYLSMGMVVLLMGLVFASVYIYRYFFLAQLARDNFFHCGVLYEDSLSSQIRTRLELEEDVKIYLEENYERINVPVPQFGGGDPADIIHDFQRGLTAYHDISLDKCYVIELNTTIVLPPRNFWELLMNVKRGTYLPQTYIIQEEMVVTEHVRDKEALGSFIYHLCNGKDTYRLRRRATRRRINKRGAKNCNAIRHFENTFVVETLICGVV"
# target = "MHTDLDTDMDADTETVALCSSSSRQASPSGTPTPEADTTLLKQKPEKLLAELDRGGPPPAPGVPRRRGSMPVPYKHQLRRAQAVDELDWPPQASSSGSSDSLGSGEAALAQKDGVFKVMLLGESGVGKSTLAGTFGGLQGDNAHEMENSEDTYERRIMVDKEEVTLIVYDIWEQGDAGGWLQDHCLQTGDAFLIVFSVTDRRSFSKVPETLLRLRAGRPHHDLPVILVGNKSDLARSREVSLEEGRHLAGTLSCKHIETSAALHHNTRELFEGAVRQIRLRRGRGHAGGQRPEPSSPDGPAPPTRRESLTKKAKRFLANLVPRNAKFFKQRSRSCHDLSVL"
# print(results[target])
# print(compare[target])
# print(query_seq[13:24])
# print(target[76:87])
# print(len(query_seq))
# print(len(target))
# print(blast.substitution_matrix[AA_TO_INT['R'],AA_TO_INT['D']])
# print(blast.substitution_matrix)
compare = json_data["blast_hsp_one_hit_pssm_1"]
query_pssm = np.array(json_data['query_pssm'], dtype=np.int64)
results = blast.search_one_hit(blast_db,
pssm=query_pssm,
T=13,
X=5,
S=30)
print(len(results))
print(len(compare))<file_sep>import numpy as np
from pathlib import Path
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.sequences = []
self.words_per_seq = 0
self.sequencesPerWords = dict()
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
self.sequences.append(sequence)
words_added = set()
for i in range(len(sequence)-2):
word = sequence[i:i+3]
if word not in self.sequencesPerWords:
self.sequencesPerWords[word] = [(sequence, i)]
words_added.add(word)
elif word not in words_added:
self.sequencesPerWords[word].append((sequence,i))
words_added.add(word)
self.words_per_seq += len(words_added)
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
return [i[0] for i in self.sequencesPerWords[word]]
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
words_in_seq = 0
for word in self.sequencesPerWords:
w = len(self.sequencesPerWords[word])
words_in_seq += w
number_of_seq = len(self.sequences)
number_of_diff_words_in_db = len(self.sequencesPerWords)
avg_words_per_seq = round(self.words_per_seq / len(self.sequences))
avg_seq_per_word = round(words_in_seq / number_of_diff_words_in_db)
return (number_of_seq, number_of_diff_words_in_db, avg_words_per_seq, avg_seq_per_word)
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.substitution_matrix = substitution_matrix
def get_words(self, *, sequence=None, pssm=None, T=11, save_positions=False):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
words = set()
if save_positions:
words = dict()
loop_length = 0
if sequence is not None:
loop_length = len(sequence) - 2
elif pssm is not None:
loop_length = len(pssm) - 2
if pssm is not None:
for i in range(loop_length):
for j in range(20):
for k in range(20):
for l in range(20):
perm_word = ALPHABET[j] + ALPHABET[k] + ALPHABET[l]
score = self.calculate_word_score(target=perm_word, pssm=pssm, pssm_row=i)
if score >= T:
if save_positions:
words[perm_word] = (i, score)
else:
words.add(perm_word)
if sequence is not None:
for i in range(loop_length):
word = sequence[i:i+3]
# no need as already all permutations with sub matrix done
if word in words:
continue
score = self.calculate_word_score(query=word, target=word)
if score >= T:
if save_positions:
words[word] = (i, score)
else:
words.add(word)
# word byitself barely reaches required score => no need to permutate as all permutations have a lower score than the word itself
if score == T:
continue
for j in range(20):
for k in range(20):
for l in range(20):
perm_word = ALPHABET[j] + ALPHABET[k] + ALPHABET[l]
score_perm = self.calculate_word_score(query=word, target=perm_word)
if score_perm >= T:
if save_positions:
words[perm_word] = (i, score_perm)
else:
words.add(perm_word)
return words
def calculate_word_score(self, target, query=None, pssm=None, pssm_row=None):
score = 0
if query is not None:
for i in range(len(query)):
score += self.substitution_matrix[AA_TO_INT[query[i]]][AA_TO_INT[target[i]]]
elif pssm is not None:
for i in range(3):
score += pssm[pssm_row+i][AA_TO_INT[target[i]]]
return score
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
if query is not None:
words = self.get_words(sequence=query, pssm=pssm, T=T,save_positions=True)
for word in words:
word_pos, word_score = words[word]
sequences_with_word = blast_db.sequencesPerWords[word]
for target_seq, word_pos_in_target in sequences_with_word:
current_max_score = current_score = word_score
stop_index = word_pos+2
start_index = word_pos
# extend right
# i = pos in query
offset_to_target = word_pos_in_target - word_pos
for i in range(word_pos+3, len(query)):
if i + offset_to_target >= len(target_seq):
break
query_aa = query[i]
target_aa = target_seq[i+offset_to_target]
#print("position", i, "offset", offset_to_target, "query length:", len(query), "target length", len(target_seq))
current_score += self.substitution_matrix[AA_TO_INT[query_aa]][AA_TO_INT[target_aa]]
# update max score
if current_score > current_max_score:
current_max_score = current_score
stop_index = i
# stop if score is smaller than highest score - X !!! ATTENTION <= or = IDK
if current_score <= current_max_score - X:
break
# extend left
current_score = current_max_score
for i in range(word_pos-1, -1, -1):
current_score += self.substitution_matrix[AA_TO_INT[query[i]]][AA_TO_INT[target_seq[i+offset_to_target]]]
# update max score
if current_score > current_max_score:
current_max_score = current_score
start_index = i
# stop if score is smaller than highest score - X !!! ATTENTION <= or = IDK
if current_score <= current_max_score - X:
break
if current_max_score >= S:
if target_seq not in d:
d[target_seq] = {(start_index, start_index+offset_to_target, stop_index-start_index+1, current_max_score)}
else:
d[target_seq].add((start_index, start_index+offset_to_target, stop_index-start_index+1, current_max_score))
print({k: d[k] for k in list(d)[:10]})
return d
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]
<file_sep>##############
# Exercise 2.7
##############
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
return aa in "RHK"
def isNegativelyCharged(aa):
return aa in "DE"
def isHydrophobic(aa):
return aa in "AVILMFYW"
def isAromatic(aa):
return aa in "YWFH"
def isPolar(aa):
return aa in "DERHKNQSTY"
def isProline(aa):
return aa in "P"
def containsSulfur(aa):
return aa in "CM"
def isAcid(aa):
return aa in "DE"
def isBasic(aa):
return aa in "RKH"
<file_sep>import numpy as np
from functools import reduce
from collections import Counter
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
self.msa = sequences
self._validate_msa()
def _validate_msa(self):
if len(self.msa) == 0:
raise TypeError("MSA is empty")
the_len = len(self.msa[0])
if not all(len(seq) == the_len for seq in self.msa):
raise TypeError("Sequences have different lengths")
for msa in self.msa:
if not all(aa in ALPHABET for aa in msa):
raise TypeError("Some sequences has invalided character")
def get_pssm(self, *, bg_matrix=False, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
msa_len = len(self.msa[0]) # or len(self.get_primary_sequence())?
pssm = np.zeros((msa_len, 20))
gap_count = [0] * msa_len
# 1. Calculate sequence weights
seq_weights = self.get_sequence_weights()
# 2. Count (with weights) observed amino acids and gaps
for row in range(0, msa_len):
for seq_idx, seq in enumerate(self.msa):
aa = seq[row]
score = 1 if not use_sequence_weights else seq_weights[seq_idx]
if aa == '-':
gap_count[row] += score
else:
aa_idx = AA_TO_INT[aa]
pssm[row][aa_idx] += score
# Convert bg_matrix to vector
aa_bg_vec = np.sum(bg_matrix, axis=1) if bg_matrix else [0.05] * 20
# 3. Redistribute gaps according to background frequencies
if redistribute_gaps:
for row_idx, gap_freq in enumerate(gap_count):
for aa_idx, cell in enumerate(pssm[row_idx]):
pssm[row_idx][aa_idx] += gap_freq * aa_bg_vec[aa_idx]
# TODO 4. Add weighted pseudocounts
if add_pseudocounts:
pass
for row_idx, row in enumerate(pssm):
row_sum = np.sum(row)
for aa_idx, aa_value in enumerate(pssm[row_idx]):
if aa_value == 0:
pssm[row_idx][aa_idx] = -20
continue
# 5. Normalize to relative frequencies
normalized = aa_value / row_sum
# 6. Divide by background frequencies
bg_freq = aa_bg_vec[aa_idx] # idx correct?
with_bg_freq = normalized / bg_freq
# 7. Calculate Log-Score
pssm[row_idx][aa_idx] = 2 * np.log2(with_bg_freq)
# 8. Remove rows corresponding to gaps in the primary sequence
gap_indices = [idx for (idx, aa) in enumerate(self.msa[0]) if aa == '-']
pssm = np.delete(pssm, gap_indices, axis=0)
return np.rint(pssm).astype(np.int64)
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return (len(self.msa), len(self.msa[0]))
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
return self.msa[0].replace('-', '')
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
n_seqs, seq_len = self.get_size()
msa_col_cnts = [0] * seq_len
for msa_col in range(0, seq_len):
aas = [seq[msa_col] for seq in self.msa]
msa_col_cnts[msa_col] = Counter(aas)
weights = np.zeros((seq_len, n_seqs))
# row: MSA column i | col: sequence k
for (row, col), _ in np.ndenumerate(weights):
cnt = msa_col_cnts[row]
aa = self.msa[col][row]
r = len(cnt.keys())
s = [v for (k, v) in cnt.items() if k == aa][0]
if r is not 1:
weights[(row,col)] = 1 / (r * s)
seq_sum = np.sum(weights, axis=0)
return seq_sum.astype(np.float64)
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
_, L = self.get_size()
aas = set()
r_i = []
for col in range(0, L):
r_i.append(len(set(seq[col] for seq in self.msa)))
N = 1/L * sum(r_i)
return N
<file_sep>import numpy as np
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.alignments = list()
self.boundaries = (0,0)
self.align()
def align(self):
"""
Align given strings using the Smith-Waterman algorithm.
NB: score matrix and the substitution matrix are different matrices!
"""
traceback = np.full((len(self.string2) + 1, len(self.string1) + 1, 3), fill_value=" ", dtype=np.str)
traceback[0][0] = 's'
for i in range(1, len(self.string2) + 1):
self.score_matrix[i][0] = 0
traceback[i][0][0] = 'u'
for j in range(1, len(self.string1) + 1):
self.score_matrix[0][j] = 0
traceback[0][j][2] = 'l'
maxim = 0
maxX = 0
maxY = 0
for j in range(1, len(self.string1) + 1):
for i in range(1, len(self.string2) + 1):
cell1 = self.score_matrix[i - 1][j] + self.gap_penalty
cell2 = self.score_matrix[i][j - 1] + self.gap_penalty
cell3 = self.score_matrix[i - 1][j - 1] + self.substitution_matrix[self.string1[j - 1]][self.string2[i - 1]]
res = max(cell1, cell2, cell3, 0)
self.score_matrix[i][j] = res
if res == cell1:
traceback[i][j][0] = 'u'
if res == cell3:
traceback[i][j][1] = 'd'
if res == cell2:
traceback[i][j][2] = 'l'
for j in range(1, len(self.string1) + 1):
for i in range(1, len(self.string2) + 1):
if self.score_matrix[i][j] > maxim:
maxim = self.score_matrix[i][j]
maxX = i
maxY = j
repeated = False
while not repeated:
alignment = list()
i = maxX
j = maxY
while self.score_matrix[i][j] > 0:
if traceback[i][j][0] == 'u':
alignment.append(self.string2[i - 1])
if traceback[i][j][1] != " " or traceback[i][j][2] != " ":
traceback[i][j][0] = " "
i -= 1
elif traceback[i][j][1] == 'd':
alignment.append(self.string2[i - 1])
if traceback[i][j][0] != " " or traceback[i][j][2] != " ":
traceback[i][j][1] = " "
i -= 1
j -= 1
elif traceback[i][j][2] == 'l':
alignment.append('-')
if traceback[i][j][1] != " " or traceback[i][j][0] != " ":
traceback[i][j][2] = " "
j -= 1
alignment.reverse()
if (self.string1[j:maxY], ''.join(alignment)) in self.alignments:
repeated = True
break
else:
self.alignments.append((self.string1[j:maxY], ''.join(alignment)))
self.boundaries = (j, i)
def has_alignment(self):
"""
:return: True if a local alignment has been found, False otherwise
"""
if self.alignments[0] != ("", ""):
return True
return False
def get_alignment(self):
"""
:return: alignment represented as a tuple of aligned strings
"""
return self.alignments[0]
def is_residue_aligned(self, string_number, residue_index):
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been alined
False otherwise
"""
string = ""
if string_number == 1:
string = self.string1
elif string_number == 2:
string = self.string2
else:
return False
b = self.boundaries[1]
if string_number == 1:
b = self.boundaries[0]
if residue_index - b >= 0:
res1 = string[residue_index]
res2 = self.alignments[0][(string_number+1) % 2][residue_index - b]
if res1 == res2:
return True
return False
'''
a = LocalAlignment("ARNDCEQGHI", "DDCEQHG", -6, {
'A': {'A': 4, 'C': 0, 'B': -2, 'E': -1, 'D': -2, 'G': 0, 'F': -2, 'I': -1, 'H': -2, 'K': -1, 'M': -1, 'L': -1, 'N': -2, 'Q': -1, 'P': -1, 'S': 1, 'R': -1, 'T': 0, 'W': -3, 'V': 0, 'Y': -2, 'X': 0, 'Z': -1},
'C': {'A': 0, 'C': 9, 'B': -3, 'E': -4, 'D': -3, 'G': -3, 'F': -2, 'I': -1, 'H': -3, 'K': -3, 'M': -1, 'L': -1, 'N': -3, 'Q': -3, 'P': -3, 'S': -1, 'R': -3, 'T': -1, 'W': -2, 'V': -1, 'Y': -2, 'X': -2, 'Z': -3},
'B': {'A': -2, 'C': -3, 'B': 4, 'E': 1, 'D': 4, 'G': -1, 'F': -3, 'I': -3, 'H': 0, 'K': 0, 'M': -3, 'L': -4, 'N': 3, 'Q': 0, 'P': -2, 'S': 0, 'R': -1, 'T': -1, 'W': -4, 'V': -3, 'Y': -3, 'X': -1, 'Z': 1},
'E': {'A': -1, 'C': -4, 'B': 1, 'E': 5, 'D': 2, 'G': -2, 'F': -3, 'I': -3, 'H': 0, 'K': 1, 'M': -2, 'L': -3, 'N': 0, 'Q': 2, 'P': -1, 'S': 0, 'R': 0, 'T': -1, 'W': -3, 'V': -2, 'Y': -2, 'X': -1, 'Z': 4},
'D': {'A': -2, 'C': -3, 'B': 4, 'E': 2, 'D': 6, 'G': -1, 'F': -3, 'I': -3, 'H': -1, 'K': -1, 'M': -3, 'L': -4, 'N': 1, 'Q': 0, 'P': -1, 'S': 0, 'R': -2, 'T': -1, 'W': -4, 'V': -3, 'Y': -3, 'X': -1, 'Z': 1},
'G': {'A': 0, 'C': -3, 'B': -1, 'E': -2, 'D': -1, 'G': 6, 'F': -3, 'I': -4, 'H': -2, 'K': -2, 'M': -3, 'L': -4, 'N': 0, 'Q': -2, 'P': -2, 'S': 0, 'R': -2, 'T': -2, 'W': -2, 'V': -3, 'Y': -3, 'X': -1, 'Z': -2},
'F': {'A': -2, 'C': -2, 'B': -3, 'E': -3, 'D': -3, 'G': -3, 'F': 6, 'I': 0, 'H': -1, 'K': -3, 'M': 0, 'L': 0, 'N': -3, 'Q': -3, 'P': -4, 'S': -2, 'R': -3, 'T': -2, 'W': 1, 'V': -1, 'Y': 3, 'X': -1, 'Z': -3},
'I': {'A': -1, 'C': -1, 'B': -3, 'E': -3, 'D': -3, 'G': -4, 'F': 0, 'I': 4, 'H': -3, 'K': -3, 'M': 1, 'L': 2, 'N': -3, 'Q': -3, 'P': -3, 'S': -2, 'R': -3, 'T': -1, 'W': -3, 'V': 3, 'Y': -1, 'X': -1, 'Z': -3},
'H': {'A': -2, 'C': -3, 'B': 0, 'E': 0, 'D': -1, 'G': -2, 'F': -1, 'I': -3, 'H': 8, 'K': -1, 'M': -2, 'L': -3, 'N': 1, 'Q': 0, 'P': -2, 'S': -1, 'R': 0, 'T': -2, 'W': -2, 'V': -3, 'Y': 2, 'X': -1, 'Z': 0},
'K': {'A': -1, 'C': -3, 'B': 0, 'E': 1, 'D': -1, 'G': -2, 'F': -3, 'I': -3, 'H': -1, 'K': 5, 'M': -1, 'L': -2, 'N': 0, 'Q': 1, 'P': -1, 'S': 0, 'R': 2, 'T': -1, 'W': -3, 'V': -2, 'Y': -2, 'X': -1, 'Z': 1},
'M': {'A': -1, 'C': -1, 'B': -3, 'E': -2, 'D': -3, 'G': -3, 'F': 0, 'I': 1, 'H': -2, 'K': -1, 'M': 5, 'L': 2, 'N': -2, 'Q': 0, 'P': -2, 'S': -1, 'R': -1, 'T': -1, 'W': -1, 'V': 1, 'Y': -1, 'X': -1, 'Z': -1},
'L': {'A': -1, 'C': -1, 'B': -4, 'E': -3, 'D': -4, 'G': -4, 'F': 0, 'I': 2, 'H': -3, 'K': -2, 'M': 2, 'L': 4, 'N': -3, 'Q': -2, 'P': -3, 'S': -2, 'R': -2, 'T': -1, 'W': -2, 'V': 1, 'Y': -1, 'X': -1, 'Z': -3},
'N': {'A': -2, 'C': -3, 'B': 3, 'E': 0, 'D': 1, 'G': 0, 'F': -3, 'I': -3, 'H': 1, 'K': 0, 'M': -2, 'L': -3, 'N': 6, 'Q': 0, 'P': -2, 'S': 1, 'R': 0, 'T': 0, 'W': -4, 'V': -3, 'Y': -2, 'X': -1, 'Z': 0},
'Q': {'A': -1, 'C': -3, 'B': 0, 'E': 2, 'D': 0, 'G': -2, 'F': -3, 'I': -3, 'H': 0, 'K': 1, 'M': 0, 'L': -2, 'N': 0, 'Q': 5, 'P': -1, 'S': 0, 'R': 1, 'T': -1, 'W': -2, 'V': -2, 'Y': -1, 'X': -1, 'Z': 3},
'P': {'A': -1, 'C': -3, 'B': -2, 'E': -1, 'D': -1, 'G': -2, 'F': -4, 'I': -3, 'H': -2, 'K': -1, 'M': -2, 'L': -3, 'N': -2, 'Q': -1, 'P': 7, 'S': -1, 'R': -2, 'T': -1, 'W': -4, 'V': -2, 'Y': -3, 'X': -2, 'Z': -1},
'S': {'A': 1, 'C': -1, 'B': 0, 'E': 0, 'D': 0, 'G': 0, 'F': -2, 'I': -2, 'H': -1, 'K': 0, 'M': -1, 'L': -2, 'N': 1, 'Q': 0, 'P': -1, 'S': 4, 'R': -1, 'T': 1, 'W': -3, 'V': -2, 'Y': -2, 'X': 0, 'Z': 0},
'R': {'A': -1, 'C': -3, 'B': -1, 'E': 0, 'D': -2, 'G': -2, 'F': -3, 'I': -3, 'H': 0, 'K': 2, 'M': -1, 'L': -2, 'N': 0, 'Q': 1, 'P': -2, 'S': -1, 'R': 5, 'T': -1, 'W': -3, 'V': -3, 'Y': -2, 'X': -1, 'Z': 0},
'T': {'A': 0, 'C': -1, 'B': -1, 'E': -1, 'D': -1, 'G': -2, 'F': -2, 'I': -1, 'H': -2, 'K': -1, 'M': -1, 'L': -1, 'N': 0, 'Q': -1, 'P': -1, 'S': 1, 'R': -1, 'T': 5, 'W': -2, 'V': 0, 'Y': -2, 'X': 0, 'Z': -1},
'W': {'A': -3, 'C': -2, 'B': -4, 'E': -3, 'D': -4, 'G': -2, 'F': 1, 'I': -3, 'H': -2, 'K': -3, 'M': -1, 'L': -2, 'N': -4, 'Q': -2, 'P': -4, 'S': -3, 'R': -3, 'T': -2, 'W': 11, 'V': -3, 'Y': 2, 'X': -2, 'Z': -3},
'V': {'A': 0, 'C': -1, 'B': -3, 'E': -2, 'D': -3, 'G': -3, 'F': -1, 'I': 3, 'H': -3, 'K': -2, 'M': 1, 'L': 1, 'N': -3, 'Q': -2, 'P': -2, 'S': -2, 'R': -3, 'T': 0, 'W': -3, 'V': 4, 'Y': -1, 'X': -1, 'Z': -2},
'Y': {'A': -2, 'C': -2, 'B': -3, 'E': -2, 'D': -3, 'G': -3, 'F': 3, 'I': -1, 'H': 2, 'K': -2, 'M': -1, 'L': -1, 'N': -2, 'Q': -1, 'P': -3, 'S': -2, 'R': -2, 'T': -2, 'W': 2, 'V': -1, 'Y': 7, 'X': -1, 'Z': -2},
'X': {'A': 0, 'C': -2, 'B': -1, 'E': -1, 'D': -1, 'G': -1, 'F': -1, 'I': -1, 'H': -1, 'K': -1, 'M': -1, 'L': -1, 'N': -1, 'Q': -1, 'P': -2, 'S': 0, 'R': -1, 'T': 0, 'W': -2, 'V': -1, 'Y': -1, 'X': -1, 'Z': -1},
'Z': {'A': -1, 'C': -3, 'B': 1, 'E': 4, 'D': 1, 'G': -2, 'F': -3, 'I': -3, 'H': 0, 'K': 1, 'M': -1, 'L': -3, 'N': 0, 'Q': 3, 'P': -1, 'S': 0, 'R': 0, 'T': -1, 'W': -3, 'V': -2, 'Y': -2, 'X': -1, 'Z': 4}
})
print(a.has_alignment())
print(a.get_alignment())
print(a.is_residue_aligned(2, 6))
'''
<file_sep>import itertools
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
SCORE = 'score'
PSSM_SCORE = 'pssm_score'
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.blast_db = list()
self.search_outputs = dict()
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
self.blast_db.append(sequence)
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
if word in self.search_outputs:
return self.search_outputs[word]
else:
filter_result = filter(lambda x: word in x, self.blast_db)
search_result = list(filter_result)
self.search_outputs[word] = search_result
return self.search_outputs[word]
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
def stats_extraction(unique_words_dict, avg_word_seq):
for sequence in self.blast_db:
words = set(sequence[i: i + 3] for i in range(len(sequence) - 3 + 1))
for word in words:
unique_words_dict[word] = unique_words_dict.get(word, 0) + 1
if len(sequence) > 2:
avg_word_seq += len(words)
return avg_word_seq
db_unique_words_dict = dict()
per_sequence_average_number_of_words = 0.0
per_sequence_average_number_of_words = stats_extraction(db_unique_words_dict, per_sequence_average_number_of_words)
num_seq = len(self.blast_db)
num_of_unique_words = len(db_unique_words_dict.keys())
per_sequence_average_number_of_words = per_sequence_average_number_of_words / num_seq
avg_num_sequences_per_word = sum(db_unique_words_dict.values()) / len(db_unique_words_dict.keys())
output_tuple = (num_seq, num_of_unique_words, int(per_sequence_average_number_of_words + 0.5), int(avg_num_sequences_per_word + 0.5))
return output_tuple
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.substitution_matrix = substitution_matrix
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
global ALPHABET
results = set()
if sequence:
for start_idx in range(0, len(sequence) - 2):
words = itertools.product(ALPHABET, repeat=3)
target = sequence[start_idx: start_idx + 3]
for query in words:
query = ''.join(query)
if self.get_score(SCORE, query, target) >= T:
results.add(query)
else:
for start_row in range(0, pssm.shape[0] - 2):
words = itertools.product(ALPHABET, repeat=3)
target = pssm[start_row: start_row + 3]
for query in words:
query = ''.join(query)
if self.get_score(PSSM_SCORE, query, target) >= T:
results.add(query)
return list(results)
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
def get_one_hit_hsp(input_query, in_pssm, target_, query_start_pos, target_start_pos, drop_threshold):
query_end_pos = query_start_pos + 3
target_end_pos = target_start_pos + 3
if input_query:
current_score = self.get_score(SCORE, input_query[query_start_pos: query_end_pos],
target_[target_start_pos: target_end_pos])
else:
current_score = self.get_score(PSSM_SCORE, target_[target_start_pos: target_end_pos],
in_pssm[query_start_pos: query_end_pos])
hsp_length = 3
best_score = current_score
best_query_start_pos = query_start_pos
best_target_start_pos = target_start_pos
best_hsp_length = hsp_length
if input_query:
query_len = len(input_query)
else:
query_len = in_pssm.shape[0]
while query_end_pos < query_len and target_end_pos < len(target_):
query_end_pos += 1
target_end_pos += 1
if input_query:
current_score += self.substitution_matrix[AA_TO_INT[input_query[query_end_pos - 1]]][
AA_TO_INT[target_[target_end_pos - 1]]]
else:
current_score += in_pssm[query_end_pos - 1][AA_TO_INT[target_[target_end_pos - 1]]]
if current_score > best_score:
best_score = current_score
best_hsp_length = query_end_pos - query_start_pos
if current_score <= best_score - drop_threshold:
break
query_end_pos = query_start_pos + best_hsp_length
current_score = best_score
while query_start_pos > 0 and target_start_pos > 0:
query_start_pos -= 1
target_start_pos -= 1
if input_query:
current_score += self.substitution_matrix[AA_TO_INT[input_query[query_start_pos]]][
AA_TO_INT[target_[target_start_pos]]]
else:
current_score += in_pssm[query_start_pos][AA_TO_INT[target_[target_start_pos]]]
if current_score > best_score:
best_score = current_score
best_hsp_length = query_end_pos - query_start_pos
best_query_start_pos = query_start_pos
best_target_start_pos = target_start_pos
if current_score <= best_score - drop_threshold:
break
return best_query_start_pos, best_target_start_pos, best_hsp_length, best_score
all_hsp = dict()
for start_pos_in_query in range(len(query) - 2 if query else pssm.shape[0] - 2):
sequence = query[start_pos_in_query: start_pos_in_query + 3] if query else None
pssm_sub = pssm if pssm is None else pssm[start_pos_in_query: start_pos_in_query + 3]
words = self.get_words(sequence=sequence, pssm=pssm_sub, T=T)
for word in words:
matching_sequences = blast_db.get_sequences(word)
for target in matching_sequences:
for match in self.find_all_matches(word, target):
one_hit_hsp = get_one_hit_hsp(query, pssm, target, start_pos_in_query, match, X)
if one_hit_hsp[3] >= S:
updated_list = all_hsp.get(target, [])
if one_hit_hsp not in updated_list:
updated_list.append(one_hit_hsp)
all_hsp[target] = updated_list
return all_hsp
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
def get_two_hit_hsp(query_, pssm_, target_, hit1_query_start_pos, hit2_query_start_pos, hit2_target_start_pos, drop_threshold):
query_end_pos = hit2_query_start_pos + 3
target_end_pos = hit2_target_start_pos + 3
if query_:
current_hsp_score = self.get_score(SCORE, query_[hit2_query_start_pos: query_end_pos],
target_[hit2_target_start_pos: target_end_pos])
else:
current_hsp_score = self.get_score(PSSM_SCORE, target_[hit2_target_start_pos: target_end_pos],
pssm_[hit2_query_start_pos: query_end_pos])
if query_:
query_len = len(query_)
else:
query_len = pssm_.shape[0]
hsp_len = 3
best_score = current_hsp_score
best_query_start_pos = hit2_query_start_pos
best_target_start_pos = hit2_target_start_pos
best_hsp_length = hsp_len
while hit2_query_start_pos > 0 and hit2_target_start_pos > 0:
hit2_query_start_pos -= 1
hit2_target_start_pos -= 1
if query_:
current_hsp_score += self.substitution_matrix[AA_TO_INT[query_[hit2_query_start_pos]]][
AA_TO_INT[target_[hit2_target_start_pos]]]
else:
current_hsp_score += pssm_[hit2_query_start_pos][AA_TO_INT[target_[hit2_target_start_pos]]]
if current_hsp_score > best_score:
best_score = current_hsp_score
best_hsp_length = query_end_pos - hit2_query_start_pos
best_query_start_pos = hit2_query_start_pos
best_target_start_pos = hit2_target_start_pos
if current_hsp_score <= best_score - drop_threshold:
break
if best_query_start_pos > hit1_query_start_pos + 3:
return None
current_hsp_score = best_score
while query_end_pos < query_len and target_end_pos < len(target_):
query_end_pos += 1
target_end_pos += 1
if query_:
current_hsp_score += self.substitution_matrix[AA_TO_INT[query_[query_end_pos - 1]]][
AA_TO_INT[target_[target_end_pos - 1]]]
else:
current_hsp_score += pssm_[query_end_pos - 1][AA_TO_INT[target_[target_end_pos - 1]]]
if current_hsp_score > best_score:
best_score = current_hsp_score
best_hsp_length = query_end_pos - best_query_start_pos
if current_hsp_score <= best_score - drop_threshold:
break
return best_query_start_pos, best_target_start_pos, best_hsp_length, best_score
hits = dict()
for word_start_pos in range(len(query) - 2 if query else pssm.shape[0] - 2):
sequence = query[word_start_pos: word_start_pos + 3] if query else None
pssm_sub = pssm if pssm is None else pssm[word_start_pos: word_start_pos + 3]
words = self.get_words(sequence=sequence, pssm=pssm_sub, T=T)
for word in words:
matching_sequences = blast_db.get_sequences(word)
for target in matching_sequences:
for match in self.find_all_matches(word, target):
word_start_pos_in_target = match
target_hits = hits.get(target, [])
target_hits.append((word_start_pos, word_start_pos_in_target))
hits[target] = target_hits
all_hsp = dict()
for target in hits.keys():
target_hits = hits.get(target, [])
if len(target_hits) < 2:
continue
target_hits = sorted(target_hits)
to_pick = [True] * len(target_hits)
for hit1_idx, hit1 in enumerate(target_hits):
for hit2_idx in range(hit1_idx + 1, len(target_hits)):
if not to_pick[hit2_idx]:
continue
hit2 = target_hits[hit2_idx]
alignments = ((hit2[0] - hit1[0]) >= 3) and ((hit2[0] - hit1[0]) <= A) and ((hit2[1] - hit2[0]) == (hit1[1] - hit1[0]))
if not alignments:
continue
two_hit_hsp = get_two_hit_hsp(query, pssm, target, hit1[0], hit2[0], hit2[1], X)
if two_hit_hsp is None or two_hit_hsp[3] < S:
break
updated_list = all_hsp.get(target, [])
if two_hit_hsp in updated_list:
print(target_hits)
updated_list.append(two_hit_hsp)
all_hsp[target] = updated_list
to_pick[hit1_idx] = False
to_pick[hit2_idx] = False
for hit_index in range(0, len(target_hits)):
if not to_pick[hit_index]:
continue
hit = target_hits[hit_index]
if two_hit_hsp[0] <= hit[0] < two_hit_hsp[0] + two_hit_hsp[2] and two_hit_hsp[1] <= hit[1] < two_hit_hsp[1] + two_hit_hsp[2] and ((hit[1] - hit[0]) == (hit1[1] - hit1[0])):
to_pick[hit_index] = False
return all_hsp
def get_score(self, score_type, input_query, input_target):
score = 0
if score_type == SCORE:
for i in range(3):
score += self.substitution_matrix[AA_TO_INT[input_query[i]]][AA_TO_INT[input_target[i]]]
if score_type == PSSM_SCORE:
for i in range(3):
score += input_target[i][AA_TO_INT[input_query[i]]]
return score
@staticmethod
def find_all_matches(p, s):
i = s.find(p)
while i != -1:
yield i
i = s.find(p, i + 1)
<file_sep>import genetic_codon
##############
# Exercise 2.5
##############
# You can use the supplied test cases for your own testing. Good luck!
def get_orfs(genome):
if ('A' not in genome or 'T' not in genome or 'G' not in genome or 'C' not in genome):
raise TypeError('The genome provided is not DNA!')
return
proteins = []
protein_tuple = ()
primary_genome = genome
reverse_genome = genetic_codon.reverse_complementary(genome)
#######################
# PRIMARY STRANDS
pri_start_codon_atg = []
pri_stop_codon_taa = []
pri_stop_codon_tag = []
pri_stop_codon_tga = []
#######################
#######################
# REVERSE STRANDS
rev_start_codon_atg = []
rev_stop_codon_taa = []
rev_stop_codon_tag = []
rev_stop_codon_tga = []
#######################
pri_start_codon_atg = index_codon(primary_genome, "ATG")
pri_stop_codon_taa = index_codon(primary_genome, "TAA")
pri_stop_codon_tag = index_codon(primary_genome, "TAG")
pri_stop_codon_tga = index_codon(primary_genome, "TGA")
rev_start_codon_atg = index_codon(reverse_genome, "ATG")
rev_stop_codon_taa = index_codon(reverse_genome, "TAA")
rev_stop_codon_tag = index_codon(reverse_genome, "TAG")
rev_stop_codon_tga = index_codon(reverse_genome, "TGA")
#Refactor indices of reverse strands
# rev_start_codon_atg = refactor_reverse_index(len(genome), rev_start_codon_atg)
# print()
# print("PRIMARY")
# print ("START ATG: ", pri_start_codon_atg)
# print ("STOP TAA: ",pri_stop_codon_taa)
# print ("STOP TAG: ",pri_stop_codon_tag)
# print ("STOP TGA: ",pri_stop_codon_tga)
# print()
# print("REVERSE")
# print ("START ATG: ", rev_start_codon_atg)
# print ("STOP TAA: ",rev_stop_codon_taa)
# print ("STOP TAG: ",rev_stop_codon_tag)
# print ("STOP TGA: ",rev_stop_codon_tga)
# codon_slice_len = 3
# for x in range(0, primary_genome):
# start_codon = slice(primary_genome[0:codon_slice_len])
# if start_codon == "ATG":
# start_codon_index = str.find(genome,start_codon)
# stop_codon = slice(primary_genome[start_codon_index+3:])
# #Grab each frame from start codon
# frame = 0 #frame 1
# for x in range(0, len(primary_genome), 3):
# pass
# fo = open("test.txt", "w")
stop_found = False
start_found = True
stop_codon_index = 0
find_new_start = False
circle_of_life = False
# while (x < len(primary_genome)):
for x in range (0, 3):
start_counter = x
i = 0 + x
while(i < len(primary_genome)):
# for i in range(start_counter, len(primary_genome), 3):
# if find_new_start:
# i = start_counter
# find_new_start = False
start_codon = primary_genome[i:i+3]
# print (start_codon)
if start_codon == "ATG":
# print("START YOUR ENGINES!")
start_codon_index = primary_genome.find(start_codon, i, i+3)
# print (start_codon_index)
start_found = True
for y in range(start_codon_index+3, len(primary_genome), 3):
stop_codon = primary_genome[y:y+3]
# print (stop_codon)
if stop_codon == "TAG" or stop_codon == "TAA" or stop_codon == "TGA":
# print ("We found a pit stop!")
stop_codon_index = primary_genome.find(stop_codon, y, y+3)
stop_found = True
if stop_found:
break
if start_found and stop_found:
start_found = False
stop_found = False
orf = primary_genome[start_codon_index:stop_codon_index+3]
protein = genetic_codon.codons_to_aa(orf)
if protein != None:
if len(protein) > 33:
protein_tuple = (start_codon_index, stop_codon_index+2, protein, circle_of_life)
proteins.append(protein_tuple)
start_counter = stop_codon_index+3
# find_new_start = True
i = stop_codon_index + 3
continue
i = i+3
if i >= len(primary_genome) and start_codon == True:
i = (i+3)%len(primary_genome)
circle_of_life = True
slice_1 = primary_genome[start_codon_index:len(genome)]
slice_2 = primary_genome[0:i]
slice_of_life = slice_1 + slice_2
for y in range (0, len(slice_of_life)):
stop_codon = primary_genome[y]
for y in range(start_codon_index+3, len(primary_genome), 3):
stop_codon = primary_genome[y:y+3]
# print (stop_codon)
if stop_codon == "TAG" or stop_codon == "TAA" or stop_codon == "TGA":
# print ("We found a pit stop!")
stop_codon_index = primary_genome.find(stop_codon, y, y+3)
stop_found = True
if stop_found:
break
return proteins
# print("we found a stop!")
# for start in pri_start_codon_atg:
# print("START: " + str(start))
# # primary_genome = primary_genome[2:]
# # start reading the genome
# if start < start_counter:
# primary_genome.find()
# stop_found = False
# start_counter = start + stop_codon_index + 3
# for x in range(start_counter, len(primary_genome), 3):
# stop_codon = primary_genome[x:x+3]
# print(stop_codon)
# if stop_codon == "TAG" or stop_codon == "TAA" or stop_codon == "TGA":
# print("we found a stop!")
# stop_codon_index = primary_genome.find(stop_codon, start_counter, x+3)
# print (stop_codon_index)
# stop_found = True
# break
# if (stop_found):
# orf = primary_genome[start:stop_codon_index+3]
# lent = len(orf)
# protein = genetic_codon.codons_to_aa(orf)
# if (len(protein) > 33 and protein):
# proteins.append(protein)
# continue
# else:
# continue
# for stop_taa in pri_stop_codon_taa:
# pass
# orf = ""
# protein = ""
# orf = primary_genome[start:stop_taa+1]
# #print ("ORF: ", orf)
# if (len(orf)%3 == 0):
# protein = genetic_codon.codons_to_aa(orf)
# if (protein != None and len(protein)> 33):
# fo.write('\n\n---- stop codon TAA')
# fo.write("\n\nProtein: " + protein)
# fo.write("\nLENGTH protein: " + str(len(protein)))
# fo.write("\nORF: " + orf)
# fo.write("\nLength of ORF: " + str(len(orf)))
# fo.write("\nDIV: " + str(len(orf)%3))
# fo.write("\nStart: " + str(start) + " stop: " + str(stop_taa))
# prot_tuple = (start, stop_taa, protein, False)
# proteins.append(prot_tuple)
# break
# # print("STop TAA: " + str(stop_taa))
# for stop_tag in pri_stop_codon_tag:
# orf = ""
# protein = ""
# orf = primary_genome[start:stop_tag+1]
# # print ("ORF: ", orf)
# if (len(orf)%3 == 0):
# protein = genetic_codon.codons_to_aa(orf)
# if (protein != None and len(protein)> 33):
# fo.write('\n\n---- stop codon TAG')
# fo.write("\nProtein: " + protein)
# fo.write("\nLENGTH protein: " + str(len(protein)))
# fo.write("\nORF: " + orf)
# fo.write("\nLength of ORF: " + str(len(orf)))
# fo.write("\nDIV: " + str(len(orf)%3))
# fo.write("\nStart: " + str(start) + " stop: " + str(stop_tag))
# prot_tuple = (start, stop_tag, protein, False)
# proteins.append(prot_tuple)
# break
# for stop_tga in pri_stop_codon_tga:
# orf = ""
# protein = ""
# if start > stop_tga:
# orf = primary_genome[start:-1]+primary_genome[0:stop_tga+1]
# else:
# orf = primary_genome[start:stop_tag+1]
# # print ("ORF: ", orf)
# if (len(orf)%3 == 0):
# protein = genetic_codon.codons_to_aa(orf)
# if (protein != None and len(protein)> 33):
# fo.write('\n\n---- stop codon TGA')
# fo.write("\n\nProtein: " + protein)
# fo.write("\nLENGTH protein: " + str(len(protein)))
# fo.write("\nORF: " + orf)
# fo.write("\nLength of ORF: " + str(len(orf)))
# fo.write("\nDIV: " + str(len(orf)%3))
# fo.write("\nStart: " + str(start) + " stop: " + str(stop_tga))
# prot_tuple = (start, stop_tga, protein, False)
# proteins.append(prot_tuple)
# break
# fo.close()
# return proteins
# go through the TAA
# calculate difference
# store those {(last-first)+1} > 99
# go through tag
# calculate difference
#go through TGA
# calculate differece
# def refactor_reverse_index(genome_length, codon_indices):
# temp = []
# x = -3
# for i in range(0, genome_length):
# print ("LENGTH: " + str(genome_length))
# print("X: " + str(x))
# print ("ASD " + str(codon_indices[x]))
# # x = x - 1
# # for index in codon_indices:
# # index = genome_length - index
# # temp.append(index)
# # print (temp)
# return temp
def index_codon(genome, codon_triplet):
i = 0
codon_inc = 0
if (codon_triplet != "ATG"):
codon_inc = 2
temp = []
for x in range(0, len(genome)):
i = str.find(genome, codon_triplet, i)
if (i != -1):
temp.append(i+codon_inc)
i = i + 3
return temp
# # Indexing start codon ATG
# for x in range(0, len(temp_genome)):
# i = str.find(temp_genome, "ATG", i)
# if (i != -1):
# start_codon_atg.append(i);
# i = i + 3
# # Index stop codon TAA
# for x in range(0, len(temp_genome)):
# j = str.find(temp_genome, "TAA", j)
# if (j != -1):
# stop_codon_taa.append(j+2);
# j = j + 3
# # Reset the index
# j = 0
# # Indexing stop codon TAG
# for x in range(0, len(temp_genome)):
# j = str.find(temp_genome, "TAG", j)
# if (j != -1):
# stop_codon_tag.append(j+2);
# j = j + 3
# # Reset the index
# j = 0
# # Index stop codon TGA
# for x in range(0, len(temp_genome)):
# j = str.find(temp_genome, "TGA", j)
# if (j != -1):
# stop_codon_tga.append(j+2);
# j = j + 3
<file_sep>import numpy as np
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
tmp_flag = True
self.len_pssm = len(sequences)
self.r = []
if self.len_pssm > 0:
self.len_protein = len(sequences[0])
for i in sequences:
if len(i)!=self.len_protein or not set(i).issubset(ALPHABET):
tmp_flag = False
break
else:
tmp_flag = False
if tmp_flag:
self.sequences = sequences
else:
raise TypeError
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
self.get_sequence_weights()
self.get_number_of_observations()
pssm = np.zeros((self.len_protein, 20))
f = np.zeros((self.len_protein, 21))
if bg_matrix is not None:
background = np.sum(bg_matrix,axis=1)
else:
background = np.ones(20)/20
bg_matrix = np.ones((20,20))/400
background = np.append(background, 0)
for i in range(self.len_protein):
for j, item in enumerate(self.sequences):
if use_sequence_weights:
f[i,AA_TO_INT[item[i]]] += self.weights[j]
else:
f[i, AA_TO_INT[item[i]]] += 1
if redistribute_gaps:
for i in f:
i += i[-1]*background
f = f[:, :-1]
background = background[:-1]
pseudo = np.zeros((self.len_protein, 20))
if add_pseudocounts:
for ind, it in enumerate(f):
for a in range(20):
for jnd, jt in enumerate(it):
pseudo[ind, a] += f[ind, jnd]/background[jnd]*bg_matrix[jnd][a]
f = ((self.num_obs-1)*f+beta*pseudo)/(self.num_obs-1+beta)
for i in f:
i/=np.sum(i)
i/=background
exp = 2 **(-10)
f[f==0] = exp
f = 2*np.log2(f)
rows = []
for ind,it in enumerate(self.sequences[0]):
if it != '-':
rows.append(ind)
f = f[rows]
pssm = np.rint(f).astype(np.int64)
return pssm
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return (self.len_pssm, self.len_protein)
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
self.primary = self.sequences[0].replace("-","")
return self.primary
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
self.r = []
weights = np.zeros(self.len_pssm)
for col_ind in range(self.len_protein):
columns = ''
for it in self.sequences:
columns += it[col_ind]
self.r.append(len(set(columns)))
if self.r[-1] > 1:
for ind, i in enumerate(columns):
weights[ind] += 1/(columns.count(i)*self.r[-1])
self.weights = weights.astype(np.float64)
return self.weights
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
self.num_obs = (np.sum(self.r)/self.len_protein).astype(np.float64)
return self.num_obs
<file_sep>import numpy as np
#random comment
class GlobalAlignment:
# result = list()
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substituion_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.result = list()
self.align()
def align(self):
n = len(self.string1)#columns
m = len(self.string2)#rows
for i in range(0, m + 1):
self.score_matrix[i][0] = self.gap_penalty * i
for j in range(0, n + 1):
self.score_matrix[0][j] = self.gap_penalty * j
for i in range(1, m + 1):
for j in range(1, n + 1):
match = self.score_matrix[i - 1][j - 1] + self.substituion_matrix[self.string2[i-1]][self.string1[j-1]]
delete = self.score_matrix[i - 1][j] + self.gap_penalty
insert = self.score_matrix[i][j - 1] + self.gap_penalty
self.score_matrix[i][j] = max(match, delete, insert)
self.findAlignments(len(self.string2),len(self.string1),"","")
def findAlignments(self, i, j,a1,a2):
if (i == 0 and j == 0):
if self.score_matrix[i, j] == 0:
self.result.append((a1,a2))
return
elif i == 0 and j > 0:
self.findAlignments(i, j - 1, self.string1[j-1] + a1, "-" + a2)
elif j == 0 and i > 0:
self.findAlignments(i-1, j, "-" + a1, self.string2[i-1] + a2)
else:
if self.score_matrix[i,j] == self.score_matrix[i,j-1] + self.gap_penalty: #up
self.findAlignments(i, j-1, self.string1[j-1] + a1, "-" + a2)
if self.score_matrix[i,j] == self.score_matrix[i-1,j] + self.gap_penalty: #left
self.findAlignments(i-1,j,'-' + a1 ,self.string2[i-1] + a2)
if self.score_matrix[i,j] == self.score_matrix[i-1,j-1] + self.substituion_matrix[self.string2[i-1]][self.string1[j-1]]:
self.findAlignments(i-1, j-1, self.string1[j-1] + a1, self.string2[i-1] + a2)
def get_best_score(self):
self.score_matrix[-1][-1]
"""
:return: the highest score for the aligned strings, int
"""
return self.score_matrix[-1][-1]
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score
"""
return len(self.result)
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
#print (self.result)
return self.result
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as an np.array
"""
return self.score_matrix
<file_sep>##############
# Exercise 2.5
##############
# You can use the supplied test cases for your own testing. Good luck!
from codons_to_amino import codons_to_aa
def complementary(dna):
seq ={'A':'T', 'T':'A', 'C':'G', 'G':'C'}
c_dna=''
for i in dna:
c_dna = c_dna + seq[i]
return c_dna
def get_orfs(genome):
for nuc in genome:
if nuc not in ['A', 'T', 'C', 'G']:
raise TypeError
frames = []
res = []
rev_comp = complementary(genome)[::-1]
print(rev_comp)
length = len(genome)
frames.append(codons_to_aa(genome[0:]))
frames.append(codons_to_aa(genome[1:]))
frames.append(codons_to_aa(genome[2:]))
frames.append(codons_to_aa(rev_comp[0:]))
frames.append(codons_to_aa(rev_comp[1:]))
frames.append(codons_to_aa(rev_comp[2:]))
print(frames)
for i in range(6):
beg = 0
c=[]
while True:
start_index = frames[i].find('M', beg)
if start_index == -1:
break
beg = start_index+1
stop_index = frames[i].find('*',beg)
c.append(stop_index)
if stop_index ==-1:
stop_index = frames[i].find('*', 0, beg)
if stop_index - start_index > 33 and stop_index !=-1 and c.count(stop_index)==1:
if i<=2:
tup = ((start_index*3)+i, ((stop_index+1)*3)+(i-1), frames[i][start_index:stop_index], False)
print(tup)
res.append(tup)
else:
tup = (length-1-(start_index * 3) - i%3, (length-1-((stop_index+1)*3+(i%3-1))), frames[i][start_index:stop_index], True)
print(tup)
res.append(tup)
return res
<file_sep>import numpy as np
from pathlib import Path
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
class BlastDb:
def __init__(self):
self.sequence_list = []
self.word_to_seq_dic = {}
self.seq_to_word_dic = {}
def add_sequence(self, sequence):
word_list = []
for index in range(len(sequence)-2):
word = sequence[index:index+3]
word_list.append(word)
word_list = set(word_list)
for word in word_list:
try:
self.word_to_seq_dic[word].append(sequence)
except KeyError:
self.word_to_seq_dic[word] = [sequence]
self.seq_to_word_dic[sequence] = word_list
self.sequence_list.append(sequence)
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
return self.word_to_seq_dic.get(word)
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
x1 = len(self.sequence_list)
x2 = len(self.word_to_seq_dic)
x3 = 0
x4 = 0
for seq in self.sequence_list:
x3 += len(self.seq_to_word_dic[seq])
x3 /= x1
for seq in self.word_to_seq_dic.values():
x4 += len(seq)
x4 /= x2
return x1, x2, int(x3+0.5), int(x4+0.5)
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.substitution_matrix = substitution_matrix
self.all_words = []
for firstLetter in ALPHABET:
for secondLetter in ALPHABET:
for thirdLetter in ALPHABET:
self.all_words.append(firstLetter+secondLetter+thirdLetter)
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
unique_words = []
print("hi")
if sequence is not None:
for index in range(len(sequence[:-2])):
word = sequence[index:index+3]
letter_1 = AA_TO_INT[word[0]]
letter_2 = AA_TO_INT[word[1]]
letter_3 = AA_TO_INT[word[2]]
for compare_word in self.all_words:
c_letter_1 = AA_TO_INT[compare_word[0]]
c_letter_2 = AA_TO_INT[compare_word[1]]
c_letter_3 = AA_TO_INT[compare_word[2]]
x1 = self.substitution_matrix[letter_1, c_letter_1]
x2 = self.substitution_matrix[letter_2, c_letter_2]
x3 = self.substitution_matrix[letter_3, c_letter_3]
if x1 + x2 +x3 >= T:
unique_words.append(compare_word)
return list(set(unique_words))
else:
for index in range(len(pssm[:-2])):
for compare_word in self.all_words:
c_letter_1 = AA_TO_INT[compare_word[0]]
c_letter_2 = AA_TO_INT[compare_word[1]]
c_letter_3 = AA_TO_INT[compare_word[2]]
x1 = pssm[index][c_letter_1]
x2 = pssm[index+1][c_letter_2]
x3 = pssm[index+2][c_letter_3]
if x1 + x2 + x3 >= T:
unique_words.append(compare_word)
return list(set(unique_words))
def find_word(self, sequence, word):
for index in range(len(sequence[:-len(word)])):
if sequence[index: index + len(word)] == word:
return index
return -1
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]<file_sep>import numpy as np
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index row
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substituion_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.align()
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
self.score_matrix = self.get_score_matrix()
self.alignments = self.get_alignments()
def get_best_score(self):
"""
:return: the highest score for the aligned strings, int
"""
CountScore = []
for alignment in self.alignments:
alignment1 = alignment[0][::-1]
alignment2 = alignment[1][::-1]
i, j = 0, 0
score = 0
found = 0
for i in range(0, len(alignment1)):
if alignment1[i] != '-' and alignment2[i] != '-':
score =score+ self.substituion_matrix[alignment1[i]][alignment2[i]]
else:
score =score+ self.gap_penalty
CountScore.append(score)
return max(CountScore)
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score
"""
return len(self.alignments)
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
row = self.score_matrix.shape[0]
col = self.score_matrix.shape[1]
graph = self.make_graph()
tracks = self.find_all_paths(graph, (row - 1,col - 1), (0, 0))
seq1 = self.string2
seq2 = self.string1
alignments = []
for track in tracks:
# using these we can reconstruct all optimal alig.-s
Baseq1 = ''
Baseq2 = ''
for step in range(0,len(track)):
if track[step][0] > 0 and track[step][1] > 0:
if track[step][0] == track[step+1][0]:
Baseq1 = '-' + Baseq1
Baseq2 = seq2[track[step][1]-1] + Baseq2
elif track[step][1] == track[step+1][1]:
Baseq2 = '-' + Baseq2
Baseq1 = seq1[track[step][0]-1] + Baseq1
else:
Baseq1 = seq1[track[step][0]-1] + Baseq1
Baseq2 = seq2[track[step][1]-1] + Baseq2
alignments.append((Baseq2,Baseq1))
return alignments
def match_score(self, che1, che2):
if che1 == '-' or che2 == '-':
return self.gap_penalty
else:
return self.substituion_matrix[che1][che2]
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as an np.array
"""
matrices = self.score_matrix
row = matrices.shape[0]
col = matrices.shape[1]
gap_penalty = self.gap_penalty
seq1 = self.string2
seq2 = self.string1
for j in range(0, col):
matrices[0][j] = gap_penalty * j
for i in range(0, row):
matrices[i][0] = gap_penalty * i
for i in range(1, row):
for j in range(1, col):
match = matrices[i - 1][j - 1] + self.match_score(seq1[i - 1], seq2[j - 1])
delete = matrices[i - 1][j] + gap_penalty
insert = matrices[i][j - 1] + gap_penalty
matrices[i][j] = max(match, delete, insert)
return np.array(matrices)
def make_graph(self):
row = self.score_matrix.shape[0]
col = self.score_matrix.shape[1]
seq1 = self.string2
seq2 = self.string1
graph = {}
for i in range(1, row)[::-1]:
graph[(i, 0)] = [(i - 1, 0)]
for j in range(1, col)[::-1]:
graph[(0, j)] = [(0, j - 1)]
graph[(i, j)] = []
score = self.score_matrix[i][j]
score_diag = self.score_matrix[i - 1][j - 1]
score_up = self.score_matrix[i][j - 1]
score_left = self.score_matrix[i - 1][j]
if score == score_diag + self.substituion_matrix[seq1[i - 1]][seq2[j - 1]]:
graph[(i, j)] += [(i - 1, j - 1)]
if score == score_left + self.gap_penalty:
graph[(i, j)] += [(i - 1, j)]
if score == score_up + self.gap_penalty:
graph[(i, j)] += [(i, j - 1)]
return graph
def find_all_paths(self,graph, starting, last, path=[]):
path = path + [starting]
if starting == last:
return [path]
if starting not in graph:
return []
paths = []
for node in graph[starting]:
if node not in path:
newpaths = self.find_all_paths(graph, node, last, path)
for newpath in newpaths:
paths.append(newpath)
return paths
<file_sep>import sys
import numpy as np
from collections import Counter
from collections import defaultdict
import itertools
import re
from pathlib import Path
import json
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.db = np.array([], dtype=np.str_)
self.N = -1
self.word_counter = Counter()
self.word_counter_keys = np.array([])
self.word_matrix = np.array([[]])
self.seq_counters = []
self.cache_to_add = []
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
self.cache_to_add.append(sequence)
words = [sequence[i:i + 3] for i in range(0, len(sequence) - 2)]
seq_counter = Counter(words)
self.word_counter.update(seq_counter)
self.seq_counters.append(seq_counter)
def add_sequence_flush(self):
"""
Flush Add a sequence to the database.
"""
# Run only if there is something to add
if len(self.cache_to_add) > 0:
seq_list = list(self.db) + self.cache_to_add
self.N = len(seq_list)
self.db = np.array(seq_list)
# Dict keys are unordered! Create one time ordered representation. (Alternative: collections.OrderedDict )
self.word_counter_keys = np.array(list(self.word_counter.keys()))
self.word_matrix = np.zeros((self.N, self.word_counter_keys.size))
print("word_matrix shape: {}".format(self.word_matrix.shape))
for idx_seq, seq_counter in enumerate(self.seq_counters):
word_iteration_list = [(idx, word) for idx, word in np.ndenumerate(self.word_counter_keys) if word in seq_counter.keys()]
for idx_word, word in word_iteration_list:
self.word_matrix[idx_seq, idx_word] = seq_counter[word]
self.cache_to_add = []
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
# First, update database with all new elements
self.add_sequence_flush()
''' # With np.char.find
indices = np.char.find(self.db, word)
indices_is_match = indices >= 0
matches = self.db[indices_is_match]
#print("Match len: {}".format(matches.size))
if np_with_indices:
match_list = (matches,indices[indices_is_match])
else:
match_list = matches # [str(s) for s in matches]
'''
# With word_matrix counts
# where(a) equal to nonzero(a)
word_index_tuple = np.nonzero(self.word_counter_keys == word) # self.word_counter.keys().index(word) only would work with collections.OrderedDict
# print("word_index_tuple : {}".format(word_index_tuple))
if word_index_tuple[0].size == 0:
# Arbitrary word may not even be in database
# print("Word not in db: " + word)
#print("Found 0 (no target matches).".format())
return []
else:
word_index = word_index_tuple[0][0] # take the only result from search
# print("Word: " + word)
# print("word_index : {}".format(word_index))
match_list = self.db[np.nonzero(self.word_matrix[:, word_index])]
#print("Found {} target matches.".format(match_list.size))
return match_list
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
# First, update database with all new elements
self.add_sequence_flush()
number_of_sequences_in_database = self.N
number_of_different_words_in_database = len(self.word_counter.keys())
average_number_of_words_per_sequence = int(round(np.mean(np.count_nonzero(self.word_matrix, axis=1))))
average_number_of_sequences_per_word = int(round(np.mean(np.count_nonzero(self.word_matrix, axis=0))))
return (number_of_sequences_in_database, number_of_different_words_in_database, average_number_of_words_per_sequence,
average_number_of_sequences_per_word)
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.substitution_matrix = substitution_matrix
def get_words(self, *, sequence=None, pssm=None, T=11, with_indices=False):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
word_aa_indices = itertools.product(range(len(ALPHABET)), repeat=3)
result_list = []
result_dict = defaultdict(lambda: [])
if pssm is not None:
L = pssm.shape[0]
elif sequence is not None:
L = len(sequence)
sequence_idx = np.array([AA_TO_INT[s] for s in sequence])
else:
print("ERROR: no sequence of PSSM given in get_words!")
raise ValueError("ERROR: no sequence of PSSM given in get_words!")
for w_idx in word_aa_indices:
previous_res = (-1, -1)
for idx in range(L - 2):
# iterate through 3-tuples of sequence
if pssm is not None:
score = np.sum(pssm[(idx,idx+1,idx+2), w_idx])
elif sequence is not None:
score = np.sum(self.substitution_matrix[sequence_idx[idx:idx+3], w_idx])
if score >= T:
word = INT_TO_AA[w_idx[0]] + INT_TO_AA[w_idx[1]] + INT_TO_AA[w_idx[2]]
if with_indices:
#prev_score, prev_idx = previous_res
#if (prev_idx + 2 >= idx) and previous_res != (-1, -1):
# if prev_score > score:
# res = (score, idx)
# # Remove last overlapping word of higher score
# print("Remove last overlapping word of higher score: old: {}, new: {} for word {}".format(result_dict[word][-1], res,word))
# del result_dict[word][-1]
# result_dict[word].append(res)
# previous_res = res
# else:
# # Overlap with lower score, do not add
# print("Do not add overlapping word of lower score: existing: {}, ignored: {} for word {}".format(result_dict[word][-1], res, word))
# pass
#else:
# res = (score, idx)
# #print("Add for word {}: {} {}".format(word,res, "(now len {})".format(len(result_dict[word])) if len(result_dict[word]) > 1 else ''))
# result_dict[word].append(res)
# previous_res = res
res = (score, idx)
result_dict[word].append(res)
else:
result_list.append(word)
break
else:
# Do not add word at this position
# word = INT_TO_AA[w_idx[0]] + INT_TO_AA[w_idx[1]] + INT_TO_AA[w_idx[2]] # This adds empty list [] to defaultdict
# if idx == L-3 and result_dict[word] == []:
# print("Ignore word {}".format(word))
pass
if with_indices:
#print("word result_dict of length {}".format(len(result_dict)))
return result_dict
else:
return result_list
'''
d = abs(L - target_sequence_idx.size) # length overlap of aligned query and target
distance_left = min(query_start_idx, target_start_idx)
distance_right = query_start_idx + (target_sequence_idx.size-target_start_idx)
if pssm is not None:
pairwise_scores = pssm[query_sequence_idx, target_sequence_idx]
elif query is not None:
pairwise_scores = self.substitution_matrix[query_sequence_idx, target_sequence_idx]
# B = A * m
# res = np.cumsum(B, axis=1)
'''
def compute_hsp(self, word=None, L=None, w_score=None, query_start_idx=None, target_start_idx=None, result_dict=None, target_sequence=None, target_sequence_idx=None,
pssm=None, query=None, query_sequence_idx=None, T=13, X=5, S=30, previous_res=None):
# forward search
if L - query_start_idx < target_sequence_idx.size - target_start_idx:
target_end_idx_fw = target_start_idx + (L - query_start_idx)
else:
target_end_idx_fw = target_sequence_idx.size
target_sequence_idx_offset_fw = target_sequence_idx[target_start_idx:min(target_sequence_idx.size, target_start_idx + (L - query_start_idx))]
if pssm is not None:
pairwise_scores_fw = pssm[np.arange(query_start_idx,min(L, query_start_idx + (target_sequence_idx.size - target_start_idx))), target_sequence_idx_offset_fw]
elif query is not None:
query_sequence_idx_offset_fw = query_sequence_idx[query_start_idx:min(L, query_start_idx + (target_sequence_idx.size - target_start_idx))]
pairwise_scores_fw = self.substitution_matrix[query_sequence_idx_offset_fw, target_sequence_idx_offset_fw]
last_highest_score_fw_np = w_score
last_highest_idx_tuple_fw_np = (query_start_idx + 2, target_start_idx + 2) # already including the full word
cumsum_fw = np.cumsum(pairwise_scores_fw)
last_highest_score_fw_np_list = []
for cs_idx_tuple, cs in np.ndenumerate(cumsum_fw[3:]): # skip the word itself, since it is always included
cs_idx = cs_idx_tuple[0]
if cs > last_highest_score_fw_np:
last_highest_score_fw_np = cs
last_highest_score_fw_np_list.append(last_highest_score_fw_np)
last_highest_idx_tuple_fw_np = (query_start_idx+cs_idx + 3, target_start_idx+cs_idx + 3)
elif cs <= last_highest_score_fw_np-X:
break
# print("L {}, len(target_sequence) {}, target_start_idx {}, target_end_idx_fw {}".format(L, len(target_sequence), target_start_idx, target_end_idx_fw))
'''
acc_score_fw_it = 0
last_highest_score_fw_it = -sys.maxsize+1
last_highest_idx_tuple_fw_it = (query_start_idx + 2, target_start_idx + 2) # already including the full word
acc_score_fw_it_list = []
pairwise_score_fw_it_list = []
last_highest_score_it_list = []
for target_idx in range(target_start_idx, target_end_idx_fw): # + 3
query_idx = query_start_idx + (target_idx - target_start_idx)
if pssm is not None:
score_fw_it = pssm[query_idx,target_sequence_idx[target_idx]]
acc_score_fw_it += score_fw_it
elif query is not None:
#print("acc_score {}, last_highest_score {}, L {}, query_idx {}, len(target_sequence) {}, target_idx {}, target_start_idx {}, target_end_idx_fw {}".format(acc_score, last_highest_score, L, query_idx, len(target_sequence), target_idx, target_start_idx, target_end_idx_fw))
score_fw_it = self.substitution_matrix[query_sequence_idx[query_idx],target_sequence_idx[target_idx]]
acc_score_fw_it += score_fw_it
acc_score_fw_it_list.append(acc_score_fw_it)
pairwise_score_fw_it_list.append(score_fw_it)
if acc_score_fw_it <= last_highest_score_fw_it - X:
break
if acc_score_fw_it > last_highest_score_fw_it:
last_highest_score_fw_it = acc_score_fw_it
last_highest_idx_tuple_fw_it = (query_idx, target_idx)
last_highest_score_it_list.append(last_highest_score_fw_it)
'''
last_highest_score = last_highest_score_fw_np
last_highest_idx_tuple_fw = last_highest_idx_tuple_fw_np
'''
if word != target_sequence[target_start_idx:target_start_idx+3]:
print("*************** Word mismatch: w {} vs target {} at target_start_idx {}",format(word, target_sequence[target_start_idx:target_start_idx+3], target_start_idx))
if query is not None and (last_highest_score_fw_np != last_highest_score_fw_it or last_highest_idx_tuple_fw_np != last_highest_idx_tuple_fw_it):
word_query_score = self.substitution_matrix[query_sequence_idx[query_start_idx:query_start_idx+3],[AA_TO_INT[s] for s in word]]
if word != target_sequence[target_start_idx:target_start_idx + 3]: # and word_query_score < 13
print("*************** Word mismatch (query): w {} vs target {} at query_start_idx {} with score {}", format(word, target_sequence[target_start_idx:target_start_idx + 3], query_start_idx, word_query_score))
print("last_highest_idx_tuple_fw_np {} != last_highest_idx_tuple_fw_it {}".format(last_highest_idx_tuple_fw_np, last_highest_idx_tuple_fw_it))
print("\n")
print("pairwise_score_fw_it_list:{}".format(np.array(pairwise_score_fw_it_list)))
print("pairwise_scores_fw: {}".format(np.array(pairwise_scores_fw)))
print("\n")
print("acc_score_fw_it_list: {}".format(np.array(np.array(acc_score_fw_it_list))))
print("cumsum_fw: {}".format(np.array(cumsum_fw)))
print("\n")
print("last_highest_score_it_list: {}".format(np.array(np.array(last_highest_score_it_list))))
print("last_highest_score_fw_np_list: {}".format(np.array(last_highest_score_fw_np_list)))
print("\n")
print("Difference in hs: np: {}, regular {}".format(last_highest_score_fw_np, last_highest_score_fw_it))
print("\n\n")
'''
# backward search
if query_start_idx < target_start_idx:
target_end_idx_bw = target_start_idx - query_start_idx
query_end_idx_bw = 0
else:
target_end_idx_bw = 0
query_end_idx_bw = query_start_idx - target_start_idx
# Reversed sequences
if target_start_idx > 0:
target_sequence_idx_offset_bw = target_sequence_idx[target_start_idx-1:(None if target_end_idx_bw <= 0 else target_end_idx_bw - 1):-1]
else:
target_sequence_idx_offset_bw = np.array([], dtype=np.int64)
if pssm is not None:
pairwise_scores_bw = pssm[np.arange(query_start_idx-1, query_end_idx_bw-1, -1), target_sequence_idx_offset_bw]
elif query is not None:
if query_start_idx > 0:
query_sequence_idx_offset_bw = query_sequence_idx[query_start_idx-1:(None if query_end_idx_bw <= 0 else query_end_idx_bw - 1):-1]
else:
query_sequence_idx_offset_bw = np.array([], dtype=np.int64)
pairwise_scores_bw = self.substitution_matrix[query_sequence_idx_offset_bw, target_sequence_idx_offset_bw]
last_highest_score_bw_np = last_highest_score
last_highest_idx_tuple_bw_np = (query_start_idx, target_start_idx) # first index of word, before looking backwards
cumsum_bw = np.cumsum(pairwise_scores_bw) + last_highest_score
last_highest_score_bw_np_list = []
for cs_idx_tuple, cs in np.ndenumerate(cumsum_bw):
cs_idx = cs_idx_tuple[0]
if cs > last_highest_score_bw_np:
last_highest_score_bw_np = cs
last_highest_score_bw_np_list.append(last_highest_score_bw_np)
last_highest_idx_tuple_bw_np = (query_start_idx - cs_idx - 1, target_start_idx - cs_idx - 1)
elif cs <= last_highest_score_bw_np - X:
break
# print("L {}, query_start_idx {}, len(target_sequence) {}, target_start_idx {}, target_end_idx_bw {}".format(L, query_start_idx, len(target_sequence), target_start_idx, target_end_idx_bw))
'''
acc_score_bw_it = last_highest_score
last_highest_score_bw_it = last_highest_score
last_highest_idx_tuple_bw_it = (query_start_idx, target_start_idx) # first index of word, before looking backwards
acc_score_bw_it_list = []
pairwise_score_bw_it_list = []
last_highest_score_it_list = []
#print("target_idx index tuple {}".format((target_start_idx - 1, target_end_idx_bw - 1, -1)))
for target_idx in range(target_start_idx - 1, target_end_idx_bw - 1, -1): # -1 for one before word and exclusive end, reverse direction
query_idx = query_start_idx - (target_start_idx - target_idx)
if pssm is not None:
score_bw_it = pssm[query_idx, target_sequence_idx[target_idx]]
acc_score_bw_it += score_bw_it
elif query is not None:
#print("acc_score {}, last_highest_score_bw_it {}, L {}, query_idx {}, len(target_sequence) {}, target_idx {}, target_start_idx {}, target_end_idx_bw {}".format(acc_score, last_highest_score_bw_it, L, query_idx, len(target_sequence), target_idx, target_start_idx, target_end_idx_bw))
score_bw_it = self.substitution_matrix[query_sequence_idx[query_idx],target_sequence_idx[target_idx]]
acc_score_bw_it += score_bw_it
acc_score_bw_it_list.append(acc_score_bw_it)
pairwise_score_bw_it_list.append(score_bw_it)
if acc_score_bw_it <= last_highest_score_bw_it - X:
break
if acc_score_bw_it > last_highest_score_bw_it:
last_highest_score_bw_it = acc_score_bw_it
last_highest_idx_tuple_bw_it = (query_idx, target_idx)
last_highest_score_it_list.append(last_highest_score_bw_it)
'''
last_highest_score = last_highest_score_bw_np
last_highest_idx_tuple_bw = last_highest_idx_tuple_bw_np
'''
if query is not None and (last_highest_score_bw_np != last_highest_score_bw_it or last_highest_idx_tuple_bw_np != last_highest_idx_tuple_bw_it):
word_query_score = self.substitution_matrix[query_sequence_idx[query_start_idx:query_start_idx + 3], [AA_TO_INT[s] for s in word]]
if word != target_sequence[target_start_idx:target_start_idx + 3]: # and word_query_score < 13
print("*************** Word mismatch (query): w {} vs target {} at query_start_idx {} with score {}",
format(word, target_sequence[target_start_idx:target_start_idx + 3], query_start_idx, word_query_score))
print("last_highest_idx_tuple_bw_np {} != last_highest_idx_tuple_bw_it {}".format(last_highest_idx_tuple_bw_np, last_highest_idx_tuple_bw_it))
print("\n")
print("pairwise_score_bw_it_list:{}".format(np.array(pairwise_score_bw_it_list)))
print("pairwise_scores_bw: {}".format(np.array(pairwise_scores_bw)))
print("\n")
print("acc_score_bw_it_list: {}".format(np.array(acc_score_bw_it_list)))
print("cumsum_bw: {}".format(np.array(cumsum_bw)))
print("\n")
print("last_highest_score_it_list: {}".format(np.array(last_highest_score_it_list)))
print("hs_score_list_np: {}".format(np.array(last_highest_score_bw_np_list)))
print("\n")
print("Difference in hs: np: {}, regular {}".format(last_highest_score_bw_np, last_highest_score_bw_it))
print("\n\n")
'''
query_hsp_start, target_hsp_start = last_highest_idx_tuple_bw
query_hsp_end, target_hsp_end = last_highest_idx_tuple_fw
# HSP has score higher than threshold
if last_highest_score >= S:
hsp_length = target_hsp_end - target_hsp_start + 1
result_tuple = (query_hsp_start, target_hsp_start, hsp_length, last_highest_score)
if query_hsp_start == 46 and target_hsp_start == 433 and target_sequence == 'MDENSELGGLETMETLTELGDELTLGDIDEMLQFVSNQVGEFSDLFSEQLCSSFPGGGGGSGSGGTSNNSSGRGTSGGAADPAVQRSFSQVPLSTFSPSSTSPQAPALQVKVSPTPPRATPVLQPRPQPQPQPPAQLQQQTVMITPTFSTAPQTRII<KEY>SDCQQMIVKLGGGTAIAAS':
print("\n")
print("word {}".format(word))
print("target_sequence {}".format(target_sequence[0:15]))
print("target_sequence hsp {}".format(target_sequence[target_hsp_start:target_hsp_start+hsp_length]))
# print("pairwise_scores_fw: {}".format(np.array(pairwise_scores_fw)))
print("cumsum_bw: {}".format(np.array(cumsum_fw)))
print("hs_score_list_fw_np: {}".format(np.array(last_highest_score_fw_np_list)))
# print("")
print("pairwise_scores_bw: {}".format(np.array(pairwise_scores_bw)))
print("cumsum_bw: {}".format(np.array(cumsum_bw)))
print("hs_score_list_bw_np: {}".format(np.array(last_highest_score_bw_np_list)))
print("Adding result_tuple {}".format(result_tuple))
#if (previous_res != (-1, -1, -1, -1) and previous_res[1] + previous_res[2] - 1 >= target_hsp_start):
# # Detected overlap
# if (previous_res[3] < last_highest_score):
# # remove overlapping hsp with lower scoring
# print("Remove overlapping hsp with lower scoring: old: {}, new {} ".format(previous_res, result_tuple))
# result_dict[target_sequence].remove(previous_res)
# result_dict[target_sequence].add(result_tuple)
# return result_tuple
# else:
# # Lower scoring overlap, do not add
# print("Do not add overlapping hsp with lower scoring: existing: {}, ignored {} ".format(previous_res, result_tuple))
# pass
#else:
# # No overlap, add result
# #print("Add hsp {} ".format(result_tuple))
# result_dict[target_sequence].add(result_tuple)
# return result_tuple
result_dict[target_sequence].add(result_tuple)
return result_tuple
else:
#print("Nothing found in target sequence {}".format(target_sequence[0:8]))
#print("Nothing found with score {}, query_start_idx {}, target_start_idx {}".format(last_highest_score, query_start_idx, target_start_idx))
return (-1, -1, -1, -1)
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
result_dict = defaultdict(lambda: set())
if pssm is not None:
L = len(pssm)
query_sequence_idx = None
elif query is not None:
L = len(query)
query_sequence_idx = np.array([AA_TO_INT[s] for s in query])
else:
print("ERROR: no sequence of PSSM given in get_words!")
raise ValueError("ERROR: no sequence of PSSM given in get_words!")
words = self.get_words(sequence=query, pssm=pssm, T=T, with_indices=True)
print("For {} words...".format(len(words)))
for w, w_res_list in words.items():
matches = blast_db.get_sequences(w)
#print("For {} target matches...".format(len(matches)))
for target_sequence in matches: # for target_sequence, target_start_idx in zip(matches, indices):
target_sequence_idx = np.array([AA_TO_INT[s] for s in target_sequence])
previous_res = (-1, -1, -1, -1)
#print("For {} target word occs...".format(len(list(re.finditer(w, target_sequence)))))
for word_match in re.finditer('(?='+w+')', target_sequence): # # no overlaps re.finditer(w, target_sequence) # positive lookahead for overlaps: re.finditer('(?='+w+')', target_sequence)
target_start_idx = word_match.start()
#print("For {} query word occs...".format(len(w_res_list)))
for w_score, query_start_idx in w_res_list:
previous_res = self.compute_hsp(L=L, word=w, w_score=w_score, query_start_idx=query_start_idx, target_start_idx=target_start_idx, result_dict=result_dict,
target_sequence=target_sequence, target_sequence_idx=target_sequence_idx,
pssm=pssm, query=query, query_sequence_idx=query_sequence_idx, T=T, X=X, S=S,
previous_res=previous_res)
return result_dict
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]
# len(blast_results) 1394, len(results) 540, len(exp_m_got) 871, len(got_m_exp) 17, len(got_and_exp) 523
# len(blast_results) 1394, len(results) 540, len(exp_m_got) 871, len(got_m_exp) 17, len(got_and_exp) 523
# len(blast_results) 1394, len(results) 540, len(exp_m_got) 871, len(got_m_exp) 17, len(got_and_exp) 523
# len(blast_results) 1394, len(results) 540, len(exp_m_got) 871, len(got_m_exp) 17, len(got_and_exp) 523
# len(blast_results) 1394, len(results) 537, len(exp_m_got) 873, len(got_m_exp) 16, len(got_and_exp) 521
# len(blast_results) 1394, len(results) 537, len(exp_m_got) 873, len(got_m_exp) 16, len(got_and_exp) 521
# len(blast_results) 1394, len(results) 715, len(exp_m_got) 679, len(got_m_exp) 0, len(got_and_exp) 715
# len(blast_results) 1394, len(results) 715, len(exp_m_got) 679, len(got_m_exp) 0, len(got_and_exp) 715
# len(blast_results) 1394, len(results) 715, len(exp_m_got) 679, len(got_m_exp) 0, len(got_and_exp) 715 # 135,50s
# len(blast_results) 1394, len(results) 715, len(exp_m_got) 679, len(got_m_exp) 0, len(got_and_exp) 715
# len(blast_results) 1394, len(results) 620, len(exp_m_got) 774, len(got_m_exp) 0, len(got_and_exp) 620 # Wit np_forward and With overlapping target word matches
# len(blast_results) 1394, len(results) 619, len(exp_m_got) 775, len(got_m_exp) 0, len(got_and_exp) 619 # Wit np_forward and With non-overlapping target word matches
# len(blast_results) 1394, len(results) 715, len(exp_m_got) 679, len(got_m_exp) 0, len(got_and_exp) 715
# len(blast_results) 1394, len(results) 619, len(exp_m_got) 775, len(got_m_exp) 0, len(got_and_exp) 619
# len(blast_results) 1394, len(results) 1389, len(exp_m_got) 5, len(got_m_exp) 0, len(got_and_exp) 1389 # pass last_highest_score_fw instead of score acc to bw pass
# len(blast_results) 1394, len(results) 1389, len(exp_m_got) 5, len(got_m_exp) 0, len(got_and_exp) 1389
# len(blast_results) 1394, len(results) 1389, len(exp_m_got) 5, len(got_m_exp) 0, len(got_and_exp) 1389
# len(blast_results) 1394, len(results) 1389, len(exp_m_got) 5, len(got_m_exp) 0, len(got_and_exp) 1389. 163.68s
# len(blast_results) 1394, len(results) 1389, len(exp_m_got) 5, len(got_m_exp) 0, len(got_and_exp) 1389. 165.34s
# len(blast_results) 1394, len(results) 1389, len(exp_m_got) 5, len(got_m_exp) 0, len(got_and_exp) 1389. 166.73s with both numpy
# len(blast_results) 1394, len(results) 1390, len(exp_m_got) 4, len(got_m_exp) 0, len(got_and_exp) 1390. 180.68 with finditer lookahead (overlapping words in target)
missed = [
'MGTTEATLRMENVDVRDEWQDEDLPRPLPEDTGEDHLGGTVEDSSSPPSTLNLSGAHRKRKTLVAPEINISLDQSEGSLLSDDFLDTPDDLDINVDDIETPDETDSLEFLGNGNELEWEDDTPVATAKNMPGDSADLFGDGSGEDGSAANGRLWRTVIIGEQEHRIDLHMIRPYMKVVTHGGYYGEGLNAIIVFAACFLPDSSSPDYHYIMENLFLYVISSLELLVAEDYMIVYLNGATPRRRMPGIGWLKKCYHMIDRRLRKNLKSLIIVHPSWFIRTVLAISRPFISVKFISKIQYVHSLEELEQLIPMEHVQLPACVLQYEEQRLRAKRESARPPQPEFLLPRSEEKPETVEEEDRAAEVTEDQETSMS',
'MSMNKGPTLLDGDLPEQENVLQRVLQLPVVSGTCECFQKTYNSTKEAHPLVASVCNAYEKGVQGASNLAAWSMEPVVRRLSTQFTAANELACRGLDHLEEKIPALQYPPEKIASELKGTISTRLRSARNSISVPIASTSDKVLGATLAGCELALGMAKETAEYAANTRVGRLASGGADLALGSIEKVVEYLLPPDKVESAPSSGRQKTQKAPKAKPSLLRRVSTLANTLSRHTMQTTARALKRGHSLAMWIPGVAPLSSLAQWGASAAMQVVSRRQSEVRVPWLHNLAASKDENHEDQTDTEGEETDEEEEEEESEAEENVLREVTALPTPLGFLGGVVHTVQKTLQNTISAVTWAPAAVLGTVGRILHLTPAQAVSSTKGRAMSLSDALKGVTDNVVDTVVHYVPLPRLSLMEPESEFQDIDNPPAEVERKGSGSRPASPESTARPGQPRAACAVRGLSAPSCPDLDDKTETSARPGLLAMPREKPARRVSDSFFRPSVMEPILGRTQYSQLRKKS',
'MSS<KEY>',
'<KEY>',
'M<KEY>TPPAPPPPPARDCGASGFHVDVVVTGVVDACIFFGKDGTKNVKEETVCLTVSPEEPPPPGQLFFLQSRGPEGPPEPPPADTASKVPGPEDSEGTTDTSLCRLYRHVSHDFLEIRFKIQRLLEPRQYMLLLPEHVLVKIFSFLPTRALAALKCTCHHFKGIIEAFGVRATDSRWSRDPLYRDDPCKQCRKRYEKGDVSLCRWHPKPYHHDLPYGRSYWMCCRRADRETPGCRLGLHDNNWVLPCNGVGGGRAGREEGR'
]
def main():
test_json = './tests/blast_test.json'
relative_path = Path(__file__).parent
with Path(relative_path, test_json).open('r') as json_file:
json_data = json.load(json_file)
blast_db = BlastDb()
blast = Blast(np.array(json_data['sub_matrix'], dtype=np.int64))
print("Number of db_sequences: " + str(len(json_data['db_sequences'])))
# input = json_data['db_sequences'][0:3]
#input = json_data['blast_hsp_one_hit_1'].keys()
input = missed
print("Input of len: {}".format(len(input)))
for s in input:
blast_db.add_sequence(s)
query = json_data['query_seq']
#query = None
#pssm = np.array(json_data['query_pssm'], dtype=np.int64)
pssm = None
results = blast.search_one_hit(blast_db, query=query, pssm=pssm, T=13, X=5, S=30)
#missed = set(input) - set(results.keys())
#for m in missed:
# print("'" + m + "'" + ",")
# print(results)
print("Results of len: {}".format(len(results)))
if __name__ == '__main__':
main()
'''
'''
<file_sep>##############
# Exercise 2.5
##############
import re
import main
# You can use the supplied test cases for your own testing. Good luck!
def get_orfs(genome_str):
genome = Genome(genome_str)
#print(genome.genome)
#print(genome.length)
genome.find_sequences()
res = genome.find_orfs()
#print(res)
return res
def complementary(genome):
out = ''
for x in genome:
if x == 'A':
out = out + 'T'
if x == 'T':
out = out + 'A'
if x == 'G':
out = out + 'C'
if x == 'C':
out = out + 'G'
return out
class Genome:
genome = None
sequences = []
length = 0
_offset = {1: 0, 2: 1, 3: 2, 4: 0, 5: 1, 6: 2}
_ctr = None
_results = None
_lengths = None
def __init__(self, genome):
if not bool(re.match('^[ATCG]*$', genome)):
raise TypeError('Not a valid genome')
self.genome = genome
self.length = len(genome)
def find_sequences(self):
def h(genome, sequences):
sequences.append([genome[i:i + 3] for i in range(0, len(genome), 3)])
sequence = [genome[i:i + 3] for i in range(1, len(genome), 3)]
sequence[len(sequence) - 1] += genome[0]
sequences.append(sequence)
sequence = [genome[i:i + 3] for i in range(2, len(genome), 3)]
sequence[len(sequence) - 1] += genome[0]
sequence[len(sequence) - 1] += genome[1]
sequences.append(sequence)
# make genome twice as long in case of circles
double_genome = self.genome * 2
# split into sequences
# primary strand
h(double_genome, self.sequences)
# complementary strand
h(complementary(double_genome)[::-1], self.sequences)
def find_orfs(self):
self._results = []
self._lengths = []
# counter to check if reverse strand
self._ctr = 1
for seq in self.sequences:
#print(self._ctr)
self._repeat(seq)
self._ctr += 1
return self._results
def _repeat(self, sequence):
start_index = 0
start_index_new = None
end_index_new = None
loop = True
while loop:
# has start codon?
if 'ATG' not in sequence[start_index:len(sequence)]:
return
start_index += sequence[start_index:-1].index('ATG')
end_index = start_index
ending = False
# check for stop codon in remaining sequence
for i in range(start_index, len(sequence)):
if not ending and sequence[i] in ['TAA', 'TAG', 'TGA']:
end_index = i
ending = True
# get correct codon indices
end_index_new = end_index * 3 + self._offset[self._ctr] + 2
start_index_new = start_index * 3 + self._offset[self._ctr]
# check if genome has been searched completely
if end_index_new > self.length:
loop = False
# doesn't have start and stop codon
if not ending:
return
# make sure length > 33
length = end_index - start_index
if length <= 33:
start_index = end_index + 1
continue
if start_index_new >= self.length:
return
# get aa string
aaSeq = main.codons_to_aa(''.join(sequence[start_index:end_index]))
if end_index_new > self.length:
end_index_new = end_index_new - self.length
# check for reverse strand
if self._ctr > 3:
result = (self.length - start_index_new - 1, self.length - end_index_new - 1, aaSeq, True)
else:
result = (start_index_new, end_index_new, aaSeq, False)
length_new = 0
if end_index_new < start_index_new:
length_new = self.length - (start_index_new - end_index_new)
else:
length_new = end_index_new - start_index_new
if self.check_for_duplicates(result, length_new):
#print(result)
self._lengths.append(length_new)
self._results.append(result)
start_index = end_index + 1
def check_for_duplicates(self, result, length):
for foo in range(len(self._results)):
if result[1] == self._results[foo][1]:
if length > self._lengths[foo]:
self._lengths.pop(foo)
self._results.pop(foo)
break
else:
return False
return True<file_sep>import numpy as np
import math
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
self.valid_MSA = []
i = 0
if len(sequences) >= 1:
while i < len(sequences) - 1:
if len(sequences[i]) != len(sequences[i + 1]):
raise TypeError("Sequence sizes don't match")
return
for ch in sequences[i]:
if not ALPHABET.__contains__(ch):
raise TypeError("Invalid AA")
return
i += 1
self.valid_MSA = sequences
else:
raise TypeError("Invalid amount of sequences")
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
alpha = self.get_number_of_observations() - 1
#Get observed AAs with respect to the weights
if use_sequence_weights is True:
observed_AAs_Gaps = self.observed_AAs_Gaps_weights()
else:
observed_AAs_Gaps = self.observed_AAs_Gaps_noweights()
#calculate background frequence for every AA
self.background_freq = np.sum(bg_matrix, axis=0)
#redstribute gaps if flag is true
if redistribute_gaps is True:
if self.background_freq is not None:
observed_AAs_Gaps = self.redistribute_gaps(observed_AAs_Gaps)
else:
observed_AAs_Gaps = self.redistribute_gaps_uniform(observed_AAs_Gaps)
#remove gap column to calculate pseudocounts
observed_AAs_GapsColRemoved = np.delete(observed_AAs_Gaps, len(ALPHABET) - 1, 1)
# add pseudocounts if flag is true
if add_pseudocounts is True:
if self.background_freq is None:
pseudocounts_matrix = self.get_pseudocounts_matrix_uniform(observed_AAs_GapsColRemoved)
alpha_freq = observed_AAs_GapsColRemoved * alpha
beta_pseudo = pseudocounts_matrix * beta
observed_AAs_GapsColRemoved = (alpha_freq + beta_pseudo)/(alpha + beta)
else:
pseudocounts_matrix = self.get_pseudocounts_matrix(observed_AAs_GapsColRemoved, bg_matrix)
alpha_freq = observed_AAs_GapsColRemoved * alpha
beta_pseudo = pseudocounts_matrix * beta
observed_AAs_GapsColRemoved = (alpha_freq + beta_pseudo) / (alpha + beta)
# calculate relative frequency for every row
relative_freq = observed_AAs_GapsColRemoved.sum(axis=1)
# Normalize with respect to frequencies
observed_AAs_GapsColRemoved = self.normalize_matrix(observed_AAs_GapsColRemoved, relative_freq)
#Divide by background frequences
if self.background_freq is not None:
observed_AAs_GapsColRemoved = self.divide_bg_freq(observed_AAs_GapsColRemoved)
else:
observed_AAs_GapsColRemoved = self.divide_bg_freq_uniform(observed_AAs_GapsColRemoved)
#Calculate log score
observed_AAs_GapsColRemoved = self.calculate_log_score(observed_AAs_GapsColRemoved)
observed_AAs_GapsColRemoved = self.remove_primary_gap_seq(observed_AAs_GapsColRemoved)
return np.rint(observed_AAs_GapsColRemoved).astype(np.int64)
def remove_primary_gap_seq(self, observed_AAs):
observed_AAs_removed_gaps = np.zeros((len(self.get_primary_sequence()), len(ALPHABET) - 1))
i = 0
j = 0
while i < len(self.valid_MSA[0]):
if self.valid_MSA[0][i] != "-":
observed_AAs_removed_gaps[j] = observed_AAs[i]
j += 1
i += 1
else:
i += 1
return observed_AAs_removed_gaps
def calculate_log_score(self, observed_AAs):
log_score_matrix = np.zeros((len(self.valid_MSA[0]), len(ALPHABET) - 1))
i = 0
while i < len(observed_AAs):
j = 0
while j < len(observed_AAs[0]):
if observed_AAs[i][j] == 0:
log_score_matrix[i][j] = -20
else:
log_score_matrix[i][j] = 2 * math.log(observed_AAs[i][j], 2)
j += 1
i += 1
return log_score_matrix
def divide_bg_freq_uniform(self, observed_AAs):
divided_PSSM = np.zeros((len(self.valid_MSA[0]), len(ALPHABET) - 1))
i = 0
while i < len(observed_AAs):
j = 0
while j < len(observed_AAs[0]):
divided_PSSM[i][j] = observed_AAs[i][j] / 0.05
j += 1
i += 1
return divided_PSSM
def divide_bg_freq(self, observed_AAs):
divided_PSSM = np.zeros((len(self.valid_MSA[0]), len(ALPHABET) - 1))
i = 0
while i < len(observed_AAs):
j = 0
while j < len(observed_AAs[0]):
divided_PSSM[i][j] = observed_AAs[i][j] / self.background_freq[j]
j += 1
i += 1
return divided_PSSM
def normalize_matrix(self, observed_AAs, relative_freq):
normalized_matrix = np.zeros((len(self.valid_MSA[0]), len(ALPHABET) - 1))
i = 0
while i < len(observed_AAs):
j = 0
while j < len(observed_AAs[0]):
normalized_matrix[i][j] = observed_AAs[i][j] / relative_freq[i]
j += 1
i += 1
return normalized_matrix
def adjust_freq(self, alpha, beta, relative_freq, relative_pseudocounts):
adjusted_freq = np.array([])
i = 0
while i < len(relative_freq):
if relative_pseudocounts is None:
adjusted_freq = np.append(adjusted_freq, (alpha * relative_freq[i]) / (alpha + beta))
else:
adjusted_frequency = ((alpha * relative_freq[i]) + (beta*relative_pseudocounts[i]))/(alpha + beta)
adjusted_freq = np.append(adjusted_freq, adjusted_frequency)
i += 1
return adjusted_freq
def get_pseudocounts_matrix_uniform(self, observed_AAs):
pseudocounts_matrix = np.zeros((len(self.valid_MSA[0]), len(ALPHABET) - 1))
i = 0
while i < len(pseudocounts_matrix):
j = 0
while j < len(pseudocounts_matrix[0]):
ch = 0
pseudocount = 0
while ch < len(ALPHABET) - 1:
pseudocount += ((observed_AAs[i][ch] / 0.05) * 0.0025)
ch += 1
pseudocounts_matrix[i][j] = pseudocount
j += 1
i += 1
return pseudocounts_matrix
def get_pseudocounts_matrix(self, observed_AAs, bg_matrix):
pseudocounts_matrix = np.zeros((len(self.valid_MSA[0]), len(ALPHABET) - 1))
i = 0
while i < len(pseudocounts_matrix):
j = 0
while j < len(pseudocounts_matrix[0]):
ch = 0
pseudocount = 0
while ch < len(ALPHABET) - 1:
pseudocount += ((observed_AAs[i][ch] / self.background_freq[ch]) * bg_matrix[ch][j])
ch += 1
pseudocounts_matrix[i][j] = pseudocount
j += 1
i += 1
return pseudocounts_matrix
def redistribute_gaps_uniform(self, observed_AAs):
observed_AAs_redistributed = observed_AAs
i = 0
while i < len(observed_AAs_redistributed):
j = 0
while j < len(observed_AAs_redistributed[0]) - 1:
observed_AAs_redistributed[i][j] += (0.05 * observed_AAs_redistributed[i][len(ALPHABET) - 1])
j += 1
i += 1
return observed_AAs_redistributed
def redistribute_gaps(self, observed_AAs):
observed_AAs_redistributed = observed_AAs
i = 0
while i < len(observed_AAs_redistributed):
j = 0
while j < len(observed_AAs_redistributed[0]) - 1:
observed_AAs_redistributed[i][j] += (self.background_freq[j] * observed_AAs_redistributed[i][len(ALPHABET) - 1])
j += 1
i += 1
return observed_AAs_redistributed
def observed_AAs_Gaps_weights(self):
observed_AAs = np.zeros((len(self.valid_MSA[0]), len(ALPHABET)))
weights = self.get_sequence_weights()
i = 0
while i < len(self.valid_MSA[0]):
j = 0
while j < len(self.valid_MSA):
observed_AAs[i][AA_TO_INT[self.valid_MSA[j][i]]] += weights[j]
j += 1
i += 1
return observed_AAs
def observed_AAs_Gaps_noweights(self):
observed_AAs = np.zeros((len(self.valid_MSA[0]), len(ALPHABET)))
i = 0
while i < len(self.valid_MSA[0]):
j = 0
tempStr = ""
while j < len(self.valid_MSA):
tempStr += self.valid_MSA[j][i]
j += 1
unique_AAs = set(tempStr)
for ch in unique_AAs:
observed_AAs[i][AA_TO_INT[ch]] = tempStr.count(ch)
i += 1
return observed_AAs
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return (len(self.valid_MSA), len(self.valid_MSA[0]))
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
return str(self.valid_MSA[0]).replace("-", "")
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
self.weight_matrix = np.zeros((len(self.valid_MSA[0]), len(self.valid_MSA) + 1))
i = 0
while i < len(self.valid_MSA[0]):
j = 0
tempStr = ""
while j < len(self.valid_MSA):
tempStr += self.valid_MSA[j][i]
j += 1
observedAAs = set(tempStr)
j = 0
while j < len(tempStr):
self.weight_matrix[i][j] = 1 / (tempStr.count(tempStr[j]) * len(observedAAs))
j += 1
self.weight_matrix[i][len(self.valid_MSA)] = len(observedAAs)
i += 1
weights = np.array([])
i = 0
while i < len(self.weight_matrix[0]) - 1:
j = 0
tempSum = 0
while j < len(self.weight_matrix):
if self.weight_matrix[j][len(self.weight_matrix[0]) - 1] != 1:
tempSum += self.weight_matrix[j][i]
j += 1
weights = np.append(weights, tempSum)
i += 1
return weights.astype(np.float64)
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
num_obs = 0
i = 0
while i < len(self.weight_matrix):
num_obs += self.weight_matrix[i][len(self.weight_matrix[0]) - 1]
i += 1
num_obs = num_obs * 1 / len(self.valid_MSA[0])
return num_obs.astype(np.float64)
<file_sep># -*- coding: utf-8 -*-
"""
IMPORTANT!:
Before writing an email asking questions such as
'What does this input has to be like?' or
'What return value do you expect?' PLEASE read our
exercise sheet and the information in this template
carefully.
If something is still unclear, PLEASE talk to your
colleagues before writing an email!
If you experience technical issues or if you find a
bug we are happy to answer your questions. However,
in order to provide quick help in such cases we need
to avoid unnecessary emails such as the examples
shown above.
"""
from Bio.PDB.MMCIFParser import MMCIFParser # Tip: This module might be useful for parsing...
from Bio.SeqUtils import seq1
import numpy as np
# silence PDBConstructionWarning
import warnings
from Bio.PDB.PDBExceptions import PDBConstructionWarning
warnings.simplefilter('ignore', PDBConstructionWarning)
# Exercise 2: Protein Data Bank
# General remark: In our exercise every structure will have EXACTLY ONE model.
# This is true for nearly all X-Ray structures. NMR structures have several models.
class PDB_Parser:
def __init__(self, path):
"""
Initialize every PDB_Parser with a path to a structure-file in CIF format.
An example file is included in the repository (7ahl.cif).
Tip: Store the parsed structure in an object variable instead of parsing it
again & again ...
"""
# parser object for reading in structure in CIF format
parser = MMCIFParser()
# Parse the structure once and re-use it in the functions below
self.structure = parser.get_structure('some structure string here, e.g. 7AHL', path)
# for model in self.structure:
# for chain in model:
# for residue in chain:
# for atom in residue:
# pass
# 3.8 Chains
def get_number_of_chains(self):
"""
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
Return:
Number of chains in this structure as integer.
"""
first_model = self.structure[0]
return len(first_model)
def get_residues(self, chain_id):
first_model = self.structure[0]
chain = first_model[chain_id]
only_aa = filter(lambda res: res.get_id()[0] == ' ' and res.get_id()[2] == ' ', chain)
return list(only_aa)
# 3.9 Sequence
def get_sequence(self, chain_id):
"""
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the amino acid sequence (single-letter alphabet!) of a given chain (chain_id)
in a Biopython.PDB structure as a string.
"""
one_letter_sequence = ''.join(seq1(res.get_resname()) for res in self.get_residues(chain_id))
return one_letter_sequence
# 3.10 Water molecules
def get_number_of_water_molecules(self, chain_id):
"""
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the number of water molecules of a given chain (chain_id)
in a Biopython.PDB structure as an integer.
"""
first_model = self.structure[0]
chain = first_model[chain_id]
only_water = list(filter(lambda res: res.get_resname() == 'HOH', chain))
return len(only_water)
# 3.11 C-Alpha distance
def get_ca_distance(self, chain_id_1, index_1, chain_id_2, index_2):
"""
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id_1 : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
index_1 : index of a residue in a given chain in a Biopython.PDB structure
chain_id_2 : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
index_2 : index of a residue in a given chain in a Biopython.PDB structure
chain_id_1 and index_1 describe precisely one residue in a PDB structure,
chain_id_2 and index_2 describe the second residue.
Return:
Return the C-alpha (!) distance between the two residues, described by
chain_id_1/index_1 and chain_id_2/index_2. Round the returned value via int().
The reason for using two different chains as an input is that also the distance
between residues of different chains can be interesting.
Different chains in a PDB structure can either occur between two different proteins
(Heterodimers) or between different copies of the same protein (Homodimers).
"""
first_model = self.structure[0]
chain_1 = first_model[chain_id_1]
chain_2 = first_model[chain_id_2]
atom_1 = chain_1[index_1]['CA']
atom_2 = chain_2[index_2]['CA']
ca_distance = atom_1 - atom_2
return int(ca_distance)
# 3.12 Contact Map
def get_contact_map(self, chain_id):
"""
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return a complete contact map (see description in exercise sheet)
for a given chain in a Biopython.PDB structure as numpy array.
The values in the matrix describe the c-alpha distance between all residues
in a chain of a Biopython.PDB structure.
Only integer values of the distance have to be given (see below).
"""
residues = self.get_residues(chain_id)
length = len(residues)
contact_map = np.zeros((length, length), dtype=np.float32)
for x in range(length):
for y in range(x, length):
dist = residues[x]['CA'] - residues[y]['CA'] # self.get_ca_distance(chain_id, x+1, chain_id, y+1)
contact_map[x][y] = dist
contact_map[y][x] = dist
return contact_map.astype(np.int) # return rounded (integer) values
# 3.13 B-Factors
def get_bfactors(self, chain_id):
"""
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the B-Factors for all residues in a chain of a Biopython.PDB structure.
The B-Factors describe the mobility of an atom or a residue.
In a Biopython.PDB structure B-Factors are given for each atom in a residue.
Calculate the mean B-Factor for a residue by averaging over the B-Factor
of all atoms in a residue.
Sometimes B-Factors are not available for a certain residue;
(e.g. the residue was not resolved); insert np.nan for those cases.
Finally normalize your B-Factors using Standard scores (zero mean, unit variance).
You have to use np.nanmean, np.nanvar etc. if you have nan values in your array.
The returned data structure has to be a numpy array rounded again to integer.
"""
length = len(self.get_sequence(chain_id))
b_factors = np.zeros((length,), dtype=np.float32)
# average b-factors of every atom in every residue
residues = self.get_residues(chain_id)
for i in range(len(residues)):
residue = residues[i]
atoms = 0
b_factor_sum = 0
for atom in residue:
atoms += 1
b_factor_sum += atom.get_bfactor()
if atoms == 0:
b_factors[i] = np.nan
else:
avg = b_factor_sum / atoms
b_factors[i] = avg
# normalize: zero mean, unit variance
mean = np.nanmean(b_factors)
b_factors -= mean
var = np.nanvar(b_factors)
std = np.sqrt(var)
b_factors /= std
return b_factors.astype(np.int) # return rounded (integer) values
def main():
print('PDB parser class.')
# p = PDB_Parser('tests/7ahl.cif')
# print(p.get_bfactors('A'))
return None
if __name__ == '__main__':
main()
<file_sep>import collections
import re
from pathlib import Path
import numpy as np
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = "ACDEFGHIKLMNPQRSTVWY"
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
def all_words():
all_3 = []
for i in ALPHABET:
for j in ALPHABET:
for k in ALPHABET:
all_3.append(f"{i}{j}{k}")
return all_3
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.seqs = []
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
self.seqs.append(sequence)
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
return [s for s in self.seqs if word in s]
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
all_3 = all_words()
c = collections.Counter(all_3)
c1 = collections.Counter(all_3)
sum_dif_words = 0
for s in self.seqs:
c2 = collections.Counter(all_3)
update_list = []
for i in range(len(s) - 2):
update_list.append(s[i : i + 3])
c1.update(update_list)
c2.update(update_list)
c2 = c2 - c
sum_dif_words += len(c2)
c1 = c1 - c
avg_word_per_seq = sum_dif_words / len(self.seqs)
sum_word_occuring = 0
for s in self.seqs:
for key in c1:
if key in s:
sum_word_occuring += 1
avg_seq_per_word = sum_word_occuring / len(c1)
return (
len(self.seqs),
len(c1),
int(round(avg_word_per_seq)),
int(round(avg_seq_per_word)),
)
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.sub = substitution_matrix
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
all_3 = all_words()
over_thr = []
if sequence:
for i in range(len(sequence) - 2):
for word in all_3:
if not word in over_thr:
score = (
self.sub[AA_TO_INT[sequence[i]]][AA_TO_INT[word[0]]]
+ self.sub[AA_TO_INT[sequence[i + 1]]][AA_TO_INT[word[1]]]
+ self.sub[AA_TO_INT[sequence[i + 2]]][AA_TO_INT[word[2]]]
)
if score >= T:
over_thr.append(word)
if pssm is not None:
for i in range(pssm.shape[0] - 2):
for word in all_3:
if not word in over_thr:
score = (
pssm[i][AA_TO_INT[word[0]]]
+ pssm[i + 1][AA_TO_INT[word[1]]]
+ pssm[i + 2][AA_TO_INT[word[2]]]
)
if score >= T:
over_thr.append(word)
return over_thr
def get_words_positions(self, *, sequence=None, pssm=None, T=11):
all_3 = all_words()
over_thr = []
if sequence:
range_len = len(sequence) - 2
else:
range_len = pssm.shape[0] - 2
for i in range(range_len):
for word in all_3:
if not (word, i) in over_thr:
if sequence:
score = (
self.sub[AA_TO_INT[sequence[i]]][AA_TO_INT[word[0]]]
+ self.sub[AA_TO_INT[sequence[i + 1]]][AA_TO_INT[word[1]]]
+ self.sub[AA_TO_INT[sequence[i + 2]]][AA_TO_INT[word[2]]]
)
else:
score = (
pssm[i][AA_TO_INT[word[0]]]
+ pssm[i + 1][AA_TO_INT[word[1]]]
+ pssm[i + 2][AA_TO_INT[word[2]]]
)
if score >= T:
over_thr.append((word, i))
return over_thr
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
result = collections.defaultdict(list)
initials = self.get_words_positions(sequence=query, pssm=pssm, T=T)
for word, occ_q in initials:
targets = blast_db.get_sequences(word)
for target in targets:
all_occurances = [m.start() for m in re.finditer(f"(?={word})", target)]
for occ_t in all_occurances:
if query:
ending_q = len(query)
max_score = (
self.sub[AA_TO_INT[query[occ_q]]][AA_TO_INT[target[occ_t]]]
+ self.sub[AA_TO_INT[query[occ_q + 1]]][
AA_TO_INT[target[occ_t + 1]]
]
+ self.sub[AA_TO_INT[query[occ_q + 2]]][
AA_TO_INT[target[occ_t + 2]]
]
)
else:
ending_q = pssm.shape[0]
max_score = (
pssm[occ_q][AA_TO_INT[target[occ_t]]]
+ pssm[occ_q + 1][AA_TO_INT[target[occ_t + 1]]]
+ pssm[occ_q + 2][AA_TO_INT[target[occ_t + 2]]]
)
max_score_length = 3
score = max_score
i = occ_q + max_score_length
j = occ_t + max_score_length
# To the right
while i < ending_q and j < len(target):
if query:
score += self.sub[AA_TO_INT[query[i]]][AA_TO_INT[target[j]]]
else:
score += pssm[i][AA_TO_INT[target[j]]]
if score > max_score:
max_score = score
max_score_length = i - occ_q + 1
elif score <= max_score - X:
break
i += 1
j += 1
max_start_q = occ_q
max_start_t = occ_t
max_score_length_left = 0
score = max_score
i = occ_q - 1
j = occ_t - 1
# To the left
while i >= 0 and j >= 0:
if query:
score += self.sub[AA_TO_INT[query[i]]][AA_TO_INT[target[j]]]
else:
score += pssm[i][AA_TO_INT[target[j]]]
if score > max_score:
max_score = score
max_score_length_left = occ_q - i
max_start_q = i
max_start_t = j
elif score <= max_score - X:
break
i -= 1
j -= 1
max_score_length += max_score_length_left
if max_score < S:
continue
if not target in result:
result[target] = set()
result[target].add(
(max_start_q, max_start_t, max_score_length, max_score)
)
if pssm is not None:
# TODO
pass
# print(dict(result))
return dict(result)
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d["SEQWENCE"] = [(1, 2, 4, 13)]
<file_sep># -*- coding: utf-8 -*-
"""
IMPORTANT!:
Before writing an email asking questions such as
'What does this input has to be like?' or
'What return value do you expect?' PLEASE read our
exercise sheet and the information in this template
carefully.
If something is still unclear, PLEASE talk to your
colleagues before writing an email!
If you experience technical issues or if you find a
bug we are happy to answer your questions. However,
in order to provide quick help in such cases we need
to avoid unnecessary emails such as the examples
shown above.
"""
import numpy
from Bio.PDB.MMCIFParser import MMCIFParser # Tip: This module might be useful for parsing...
import numpy as np
aa_store = {
"ALA": "A",
"ARG": "R",
"ASN": "N",
"ASP": "D",
"CYS": "C",
"GLU": "E",
"GLN": "Q",
"GLY": "G",
"HIS": "H",
"ILE": "I",
"LEU": "L",
"LYS": "K",
"MET": "M",
"PHE": "F",
"PRO": "P",
"SER": "S",
"THR": "T",
"TRP": "W",
"TYR": "Y",
"VAL": "V"
}
############# Exercise 2: Protein Data Bank #############
# General remark: In our exercise every structure will have EXACTLY ONE model.
# This is true for nearly all X-Ray structures. NMR structures have several models.
def calc_residue_dist(residue_one, residue_two):
diff_vector = residue_one["CA"].coord - residue_two["CA"].coord
return int(numpy.sqrt(numpy.sum(diff_vector * diff_vector)))
class PDB_Parser:
CIF_PARSER = MMCIFParser() # parser object for reading in structure in CIF format
def __init__( self, path):
'''
Initialize every PDB_Parser with a path to a structure-file in CIF format.
An example file is included in the repository (7ahl.cif).
Tip: Store the parsed structure in an object variable instead of parsing it
again & again ...
'''
self.structure = self.CIF_PARSER.get_structure(self,path) # Parse the structure once and re-use it in the functions below
# 3.8 Chains
def get_number_of_chains( self ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
Return:
Number of chains in this structure as integer.
'''
new_chains =[]
count = 0
n_chains = self.structure.child_list[0]
new_chains = n_chains.child_dict
for records in new_chains:
count += 1
return count
# 3.9 Sequence
def get_sequence( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the amino acid sequence (single-letter alphabet!) of a given chain (chain_id)
in a Biopython.PDB structure as a string.
'''
sequence = []
model = self.structure.child_list[0]
new_sequence = model.child_dict[chain_id]
for residue in new_sequence.child_list:
if residue.resname in aa_store.keys():
sequence.append(aa_store[residue.resname])
return ''.join(sequence)
# 3.10 Water molecules
def get_number_of_water_molecules( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the number of water molecules of a given chain (chain_id)
in a Biopython.PDB structure as an integer.
'''
sequence = []
water = 'HOH'
count = 0
model = self.structure.child_list[0]
n_waters = model.child_dict[chain_id]
for residue in n_waters.child_list:
if residue.resname in water:
count += 1
return count
# 3.11 C-Alpha distance
def get_ca_distance( self, chain_id_1, index_1, chain_id_2, index_2 ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id_1 : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
index_1 : index of a residue in a given chain in a Biopython.PDB structure
chain_id_2 : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
index_2 : index of a residue in a given chain in a Biopython.PDB structure
chain_id_1 and index_1 describe precisely one residue in a PDB structure,
chain_id_2 and index_2 describe the second residue.
Return:
Return the C-alpha (!) distance between the two residues, described by
chain_id_1/index_1 and chain_id_2/index_2. Round the returned value via int().
The reason for using two different chains as an input is that also the distance
between residues of different chains can be interesting.
Different chains in a PDB structure can either occur between two different proteins
(Heterodimers) or between different copies of the same protein (Homodimers).
'''
water = 'HOH'
model = self.structure.child_list[0]
chain_1_model = model.child_dict[chain_id_1]
chain_1_idx = chain_1_model.child_list[index_1]
chain_2_model = model.child_dict[chain_id_2]
chain_2_idx = chain_2_model.child_list[index_2]
if chain_1_idx.resname != water and chain_2_idx.resname != water:
diff_vector = chain_1_idx["CA"].coord - chain_2_idx["CA"].coord
return int(numpy.sqrt(numpy.sum(diff_vector * diff_vector)))
# 3.12 Contact Map
def get_contact_map( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return a complete contact map (see description in exercise sheet)
for a given chain in a Biopython.PDB structure as numpy array.
The values in the matrix describe the c-alpha distance between all residues
in a chain of a Biopython.PDB structure.
Only integer values of the distance have to be given (see below).
'''
water = 'HOH'
model = self.structure.child_list[0]
chain_1_model = model.child_dict[chain_id]
chain_1_idx = chain_1_model.child_list
contact_map = np.zeros((len(chain_1_idx), len(chain_1_idx)), dtype=np.float32 )
for row, residue_one in enumerate(chain_1_idx):
for col,residue_two in enumerate(chain_1_idx):
if residue_one.resname in water:
break
elif residue_two.resname in water:
break
else: contact_map[row, col] = calc_residue_dist(residue_one, residue_two)
contact_map = contact_map[~np.all(contact_map == 0, axis=1)]
contact_map = np.delete(contact_map, np.where(~contact_map.any(axis=0))[0], axis=1)
return contact_map.astype(np.int) # return rounded (integer) values
# 3.13 B-Factors
def get_bfactors( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the B-Factors for all residues in a chain of a Biopython.PDB structure.
The B-Factors describe the mobility of an atom or a residue.
In a Biopython.PDB structure B-Factors are given for each atom in a residue.
Calculate the mean B-Factor for a residue by averaging over the B-Factor
of all atoms in a residue.
Sometimes B-Factors are not available for a certain residue;
(e.g. the residue was not resolved); insert np.nan for those cases.
Finally normalize your B-Factors using Standard scores (zero mean, unit variance).
You have to use np.nanmean, np.nanvar etc. if you have nan values in your array.
The returned data structure has to be a numpy array rounded again to integer.
'''
length = 10
b_factors = np.array( length, dtype=np.float32 )
return b_factors.astype( np.int ) # return rounded (integer) values
def main():
print('PDB parser class.')
file = PDB_Parser('tests/7ahl.cif')
len = file.get_ca_distance('A', 121, 'E', 120)
return len
if __name__ == '__main__':
main()<file_sep>##############
# Exercise 2.6
##############
from collections import Counter
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
return len(self.__sequences)
def get_average_length(self):
sum = 0
for x in self.__sequences:
sum += len(x)
return float(sum) / self.get_counts()
def read_fasta(self, path):
with open(path, 'r') as f:
seq = ''
sequence_started = False
for line in f:
if line.startswith('>') or line.startswith(';'):
if sequence_started:
self.__sequences.append(seq.replace('*', ''))
seq = ''
sequence_started = False
continue
sequence_started = True
seq += line.strip()
self.__sequences.append(seq.replace('*', ''))
def get_abs_frequencies(self):
# return number of occurrences not normalized by length
total_string = ''.join(self.__sequences)
counted = Counter(total_string)
return counted
def get_av_frequencies(self):
# return number of occurrences normalized by length
total_string = ''.join(self.__sequences)
counted = Counter(total_string)
for key in counted:
counted[key] /= float(len(total_string))
return counted<file_sep>import numpy as np
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
self.sequences = sequences
self.gap_char = '-'
self.neg_inf_replace = -20
if not self.sequences_is_sane_check():
raise TypeError
# Sequence is valid
self.x_size = len(self.sequences[0])
self.y_size = len(self.sequences)
self.seq_weight = None
self.pssm_original = None
def sequences_is_sane_check(self):
# Seq count check
if len(self.sequences) < 1:
return False
# Seq length check
size_of_first = self.sequences[0]
for seq in self.sequences:
if len(seq) != len(size_of_first):
return False
for seq in self.sequences:
for singel_amino in seq:
if singel_amino not in ALPHABET:
return False
return True
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
#Calculate sequence weights
if self.seq_weight is None:
self.seq_weight = self.get_sequence_weights()
# Count(with weights)observed amino acids and gaps TODO: weights
if use_sequence_weights == True:
self.pssm_original = self.count_occurrences_to_2d_with_gaps(self.seq_weight)
else:
self.pssm_original = self.count_occurrences_to_2d_with_gaps(None)
pssm = self.pssm_original
bg_vector = None
if bg_matrix is not None:
bg_vector = np.zeros(20)
for y in range(20):
bg_vector[y] = np.sum(bg_matrix[y])
# Redistribute gaps according to background frequencies
if redistribute_gaps:
pssm = self.redistribute_gaps(pssm, bg_matrix)
pssm = self.remove_gap_column(pssm) # Needed here?
# Add weighted pseudocounts TODO: implement
# Normalize to relative frequencies
pssm = self.normalize_by_row_sum(pssm)
# Divide by background frequencies
pssm = self.div_with_bg_freq(pssm, bg_vector)
# Calculate Log-Score
pssm = self.apply_log_with_bg_freq(pssm, bg_vector)
pssm = self.correct_for_neg_inf_entries(pssm)
# Remove rows corresponding to gaps in primary sequence
pssm = self.remove_primary_gap_columns_from_matrix(pssm)
# Round
pssm.round()
return np.rint(pssm).astype(np.int64)
def correct_for_neg_inf_entries(self, matrix):
for x in range(matrix.shape[0]):
for y in range(20):
if np.isneginf(matrix[x][y]):
matrix[x][y] = self.neg_inf_replace
return matrix
def count_occurrences_to_2d_with_gaps(self, weights):
pssm = np.zeros((self.x_size, len(ALPHABET)))
for x in range(self.x_size):
for y in range(self.y_size):
weight_mul = 1
if weights is not None:
weight_mul = weights[y]
pos = AA_TO_INT[self.sequences[y][x]]
pssm[x][pos] += 1 * weight_mul
return pssm
def remove_gap_column(self, matrix):
return np.delete(matrix, GAP_INDEX, 1)
def remove_primary_gap_columns_from_matrix(self, matrix):
pssm = np.zeros((len(self.get_primary_sequence()), 20))
pssm_counter = 0
for x in range(matrix.shape[0]):
if self.sequences[0][x] != self.gap_char:
pssm[pssm_counter] = matrix[x]
pssm_counter += 1
return pssm
def redistribute_gaps(self, matrix, bg_matrix):
for y in range(matrix.shape[0]):
from_amino = self.sequences[0][y]
if from_amino != ALPHABET[GAP_INDEX]:
gap_count = matrix[y][GAP_INDEX]
for x in range(20): # well ok, just use static 20
matrix[y][x] += self.get_bg_freq_for_amino_substitution(bg_matrix, from_amino, INT_TO_AA[x]) * gap_count
return matrix
def get_bg_freq_for_amino_substitution(self, bg_matrix, from_amino, to_amino):
if bg_matrix is None:
return 0.05
else:
return bg_matrix[AA_TO_INT[from_amino]][AA_TO_INT[to_amino]]
def div_with_bg_freq(self, matrix, bg_vector):
if bg_vector is None: # avoid stepping threw matrix manually. NP can do this faster
return matrix / 0.05
else:
primary_seq = self.get_primary_sequence()
for y in range(matrix.shape[0]):
for x in range(20):
matrix[y][x] = matrix[y][x] / bg_vector[x]
return matrix
def apply_log_with_bg_freq(self, matrix, bg_vector):
if bg_vector is None:
return 2 * np.log2(matrix)
else:
primary_seq = self.get_primary_sequence()
for y in range(matrix.shape[0]):
for x in range(20):
matrix[y][x] = 2 * np.log2(matrix[y][x]/bg_vector[x])
return matrix
def get_position_of_amino(self, amino):
return self.protein_list.index(amino)
def normalize_by_row_sum(self, matrix):
for x in range(len(matrix)):
matrix[x] = matrix[x]/matrix[x].sum()
return matrix # not really needed, but lets stay consistent
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return (len(self.sequences), len(self.sequences[0]))
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
return self.sequences[0].replace(self.gap_char, '')
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
weights = np.zeros(self.y_size)
for x in range(self.x_size):
count_dict = {}
for y in range(self.y_size):
key = self.sequences[y][x]
count_dict[key] = count_dict.get(key, 0) + 1
if len(count_dict) > 1:
count_dict = {k: 1 / (v * len(count_dict)) for k, v in count_dict.items()}
for y in range(self.y_size):
weights[y] += count_dict[self.sequences[y][x]]
return weights.astype(np.float64)
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
counter = 0
for x in range(self.x_size):
single_unique_dict = {}
for y in range(self.y_size):
single_unique_dict[self.sequences[y][x]] = 0
counter += len(single_unique_dict)
return counter/self.x_size
<file_sep>##############
# Exercise 2.6
##############
from collections import Counter
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = {}
self.read_fasta(filepath)
def get_counts(self):
return len(self.__sequences.keys())
def get_average_length(self):
cnt = 0
for i,j in self.__sequences.items():
cnt = cnt+len(j)
res=cnt/len(self.__sequences.keys())
return res
def read_fasta(self, path):
def reader(fp):
head= None
seq=[]
for line in fp:
line=line.rstrip()
if(line.startswith('>')):
if(head):
yield(head, ''.join(seq))
head=line
seq=[]
else:
seq.append(line)
if head:
yield(head,"".join(seq))
with open(path) as filee:
for i,j in reader(filee):
if j[-1]=="*":
j=j[:-1]
self.__sequences[i]=j
def get_abs_frequencies(self):
# return number of occurences not normalized by length
resf={}
for i,j in self.__sequences.items():
number_counted=Counter(j)
for k in number_counted:
if(not k in resf):
resf[k]=number_counted[k]
else:
resf[k]=resf[k]+number_counted[k]
return resf
def get_av_frequencies(self):
# return number of occurences normalized by length
resf = {}
for i,j in self.__sequences.items():
number_counted=Counter(j)
for k in number_counted:
if(not k in resf):
resf[k]=number_counted[k]
else:
resf[k]=resf[k]+number_counted[k]
cnt=sum(resf.values())
resf_dict={}
for i in resf:
resf_dict[i]=resf[i]/cnt
return resf_dict
<file_sep>##############
# Exercise 2.6
##############
from collections import Counter
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
return len(self.__sequences)
pass
def get_total_length(self):
return sum([len(body) for (header, body) in self._AADist__sequences])
def get_average_length(self):
return self.get_total_length() / float(self.get_counts())
def aa_dist(self):
cnt = Counter()
for aa in self.__sequences:
cnt[aa] += 1
return cnt
def read_fasta(self, path):
this_header = ''
this_body = ''
with open(path, 'r') as f:
for line in f:
if line[0] == '>':
this_header = line[:-1]
elif line == '\n':
self.__sequences.append((this_header, this_body))
this_header = ''
this_body = ''
else:
this_body = this_body + line[:-1].strip('* ')
if this_header != '':
self.__sequences.append((this_header, this_body))
def get_abs_frequencies(self):
cnt = Counter()
for body in [body for (header, body) in self.__sequences]:
for aa in body:
cnt[aa] += 1
return cnt
def get_av_frequencies(self):
cnt = self.get_abs_frequencies()
total_length = self.get_total_length()
for key, value in cnt.items():
cnt[key] = value / float(total_length)
return cnt
dist = AADist('tests/tests.fasta')
print(dist.get_counts())
print(dist.get_average_length())
print(dist.get_abs_frequencies())
pass
<file_sep># -*- coding: utf-8 -*-
"""
IMPORTANT!:
Before writing an email asking questions such as
'What does this input has to be like?' or
'What return value do you expect?' PLEASE read our
exercise sheet and the information in this template
carefully.
If something is still unclear, PLEASE talk to your
colleagues before writing an email!
If you experience technical issues or if you find a
bug we are happy to answer your questions. However,
in order to provide quick help in such cases we need
to avoid unnecessary emails such as the examples
shown above.
"""
from Bio.PDB.MMCIFParser import MMCIFParser # Tip: This module might be useful for parsing...
from Bio.PDB import *
import numpy as np
import os
import sys
from Bio.PDB.Polypeptide import *
############# Exercise 2: Protein Data Bank #############
# General remark: In our exercise every structure will have EXACTLY ONE model.
# This is true for nearly all X-Ray structures. NMR structures have several models.
class PDB_Parser:
CIF_PARSER = MMCIFParser() # parser object for reading in structure in CIF format
def __init__( self, path ):
self.structure_id=(os.path.basename(path)).upper().split('.')[0]
self.structure=self.CIF_PARSER.get_structure(self.structure_id, path)
# 3.8 Chains
def get_number_of_chains( self ):
n_chains = len(list(self.structure.get_chains()))
return n_chains
# 3.9 Sequence
def get_sequence( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the amino acid sequence (single-letter alphabet!) of a given chain (chain_id)
in a Biopython.PDB structure as a string.
'''
standard_aa_names = ["ALA", "CYS", "ASP", "GLU", "PHE", "GLY", "HIS", "ILE", "LYS",
"LEU", "MET", "ASN", "PRO", "GLN", "ARG", "SER", "THR", "VAL",
"TRP", "TYR"]
non_standard_aa_names=[]
model = self.structure[0]
chain=model[chain_id]
sequence=""
for residue in chain:
name=residue.get_resname()
if name in standard_aa_names:
sequence=sequence+three_to_one(name)
return sequence
# 3.10 Water molecules
def get_number_of_water_molecules( self, chain_id ):
model=self.structure[0]
chain=model[chain_id]
n_waters = 0
for residue in chain.get_list():
residue_id=residue.get_id()
hetfield=residue_id[0]
if hetfield[0]=="W":
n_waters=n_waters+1
return n_waters
# 3.11 C-Alpha distance
def get_ca_distance( self, chain_id_1, index_1, chain_id_2, index_2 ):
model=self.structure[0]
chain1=model[chain_id_1]
chain2=model[chain_id_2]
residue1=chain1[index_1]
residue2=chain2[index_2]
coor1=residue1["CA"].get_coord()
coor2=residue2["CA"].get_coord()
ca_distance = np.linalg.norm(coor1-coor2)
return int( ca_distance )
# removes water residues etc.
def aa_residues(self,chain):
aa_only = []
for i in chain:
if i.get_resname() in standard_aa_names:
aa_only.append(i)
return aa_only
# 3.12 Contact Map
def get_contact_map( self, chain_id ):
model=self.structure[0]
chain=model[chain_id]
chain_of_only_aa=self.aa_residues(chain)
length = len(chain_of_only_aa)
contact_map = np.zeros( (length,length), dtype=np.float32 )
for row, residue1 in enumerate(chain_of_only_aa) :
for col, residue2 in enumerate(chain_of_only_aa) :
distance=np.linalg.norm(residue1["CA"].get_coord()-residue2["CA"].get_coord())
contact_map[row, col] = distance
return contact_map.astype( np.int64 ) # return rounded (integer) values
# 3.13 B-Factors
def get_bfactors( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the B-Factors for all residues in a chain of a Biopython.PDB structure.
The B-Factors describe the mobility of an atom or a residue.
In a Biopython.PDB structure B-Factors are given for each atom in a residue.
Calculate the mean B-Factor for a residue by averaging over the B-Factor
of all atoms in a residue.
Sometimes B-Factors are not available for a certain residue;
(e.g. the residue was not resolved); insert np.nan for those cases.
Finally normalize your B-Factors using Standard scores (zero mean, unit variance).
You have to use np.nanmean, np.nanvar etc. if you have nan values in your array.
The returned data structure has to be a numpy array rounded again to integer.
'''
model=self.structure[0]
chain=model[chain_id]
chain_of_only_aa=self.aa_residues(chain)
length = len(chain_of_only_aa)
b_factors = np.array( length, dtype=np.float32 )
b_fact=[]
res_num=0
for residue in chain_of_only_aa:
atom_b_factor=[]
for atom in residue:
atom_b_factor.append(atom.get_bfactor())
atoms_b_fact=np.asarray(atom_b_factor)
mean_bfactor=np.nanmean(atom_b_factor)
#var=np.nanvar(atom_b_factor)
b_fact.append(mean_bfactor)
res_num+=1
b_factors=np.asarray(b_fact)
b_factors=(b_factors - b_factors.mean()) / b_factors.std()
print(len(b_factors))
return b_factors.astype( np.int ) # return rounded (integer) values
def main():
print('PDB parser class.')
return None
if __name__ == '__main__':
main()<file_sep>#!/bin/bash
#
# TODO: Open results in browser directly
# TODO: Run mossum automatically
# TODO: Nicer folder structure
#
# Excercise folders & template folders names must be '1.zip', '2.zip', '3.zip'...
#Eliminate spaces in folder names
find -name "* *" -print0 | sort -rz | \
while read -d $'\0' f; do mv -v "$f" "$(dirname "$f")/$(basename "${f// /-}")"; done
#Unzip each excercise folder separately
find -name '*.zip' -exec sh -c 'unzip -d "${1%.*}" "$1"' _ {} \;
#collect excercise folders
dirs=($(find . -maxdepth 1 -mindepth 1 -type d -printf '%f\n'))
#Unzip the student repos for each excercise separately
for d in ${dirs[@]}; do
cd "${d}/repos/zippedRepos"
#Eliminate spaces
find -name "* *" -print0 | sort -rz | \
while read -d $'\0' f; do mv -v "$f" "$(dirname "$f")/$(basename "${f// /-}")"; done
mkdir ../../collected_files
find -name '*.zip' -exec sh -c 'unzip -d "${1%.*}" "$1"' _ {} \;
find -name "* *" -print0 | sort -rz | \
while read -d $'\0' f; do mv -v "$f" "$(dirname "$f")/$(basename "${f// /-}")"; done
cd ../../..
done
<file_sep>import numpy as np
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.start_ind = 0
self.start_m = 0
self.stop_ind = 0
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.align_tuple = ("", "")
self.alignment = ""
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.align()
def align(self):
"""
Align given strings using the Smith-Waterman algorithm.
NB: score matrix and the substitution matrix are different matrices!
"""
self.fill_score_matrix()
m = self.get_best_score_index()[0]
n = self.get_best_score_index()[1]
self.stop_ind = n
self.make_alignments(m, n, "")
self.align_tuple = (self.string1[self.start_ind:self.stop_ind], self.alignment)
def fill_score_matrix(self):
for m in range(1, self.score_matrix.shape[0]):
for n in range(1, self.score_matrix.shape[1]):
pos1 = self.score_matrix[m-1][n-1] + self.substitution_matrix[self.string2[m-1]][self.string1[n-1]]
pos2 = self.score_matrix[m-1][n] + self.gap_penalty
pos3 = self.score_matrix[m][n-1] + self.gap_penalty
self.score_matrix[m][n] = max(pos1, pos2, pos3, 0)
def make_alignments(self, m, n, alignment):
self.get_best_score_index()
if self.score_matrix[m][n] == 0:
self.alignment = alignment[::-1]
self.start_ind = n
self.start_m = m
#print(self.alignment)
return alignment[::-1]
else:
pos1 = self.score_matrix[m - 1][n - 1] + self.substitution_matrix[self.string2[m - 1]][self.string1[n - 1]]
pos2 = self.score_matrix[m - 1][n] + self.gap_penalty
pos3 = self.score_matrix[m][n - 1] + self.gap_penalty
pos1 = max(pos1, 0)
pos2 = max(pos2, 0)
pos3 = max(pos3, 0)
if self.score_matrix[m][n] == pos1:
#substitution
alignment2 = alignment + self.string2[m - 1]
self.make_alignments(m-1, n-1, alignment2)
if self.score_matrix[m][n] == pos2:
#moved right
alignment2 = alignment + "-"
self.make_alignments(m - 1, n, alignment2)
if self.score_matrix[m][n] == pos3:
#moved down
alignment2 = alignment + "-"
self.make_alignments(m, n - 1, alignment2)
def get_best_score_index(self):
best = 0
index_foo = 0
index_bar = 0
for foo in range(self.score_matrix.shape[0]):
for bar in range(self.score_matrix.shape[1]):
next_score = self.score_matrix[foo][bar]
if next_score > best:
index_foo = foo
index_bar = bar
best = next_score
return (index_foo, index_bar)
def has_alignment(self):
"""
:return: True if a local alignment has been found, False otherwise
"""
if not self.alignment == "":
return True
else:
return False
def get_alignment(self):
"""
:return: alignment represented as a tuple of aligned strings
"""
if not self.has_alignment():
return ("","")
return (self.string1[self.start_ind:self.stop_ind], self.alignment)
def check_alignment(self, seq, residue_index, new_residue_index):
#if check_gaps:
for foo in range(len(self.alignment)):
if foo == len(self.alignment) - 1:
break
if foo < new_residue_index + 1:
if self.alignment[foo] == "-":
new_residue_index += 1
if new_residue_index > len(self.alignment)-1:
return False
if self.alignment[new_residue_index] == seq[residue_index]:
return True
else:
return False
def is_residue_aligned(self, string_number, residue_index):
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been aligned
False otherwise
"""
if not self.has_alignment():
return False
if string_number == 1:
new_residue_index = residue_index - self.start_ind
if len(self.align_tuple[0]) == len(self.align_tuple[1]):
if new_residue_index > (len(self.alignment) - 1):
return False
if new_residue_index >= 0 and self.align_tuple[0][new_residue_index] == self.align_tuple[1][new_residue_index]:
return True
# check_gaps = False
# if len(self.string1) < len(self.string2):
# check_gaps = True
return self.check_alignment(self.string1, residue_index, new_residue_index)
else:
new_residue_index = residue_index - self.start_m
# check_gaps = False
# if len(self.string2) < len(self.string1):
# check_gaps = True
return self.check_alignment(self.string2, residue_index, new_residue_index)
# if string_number == 1:
# #if self.string1[residue_index] in self.align_tuple[1]:
# new_index = residue_index - self.start_ind
# if self.align_tuple[1][new_index] == self.string1[residue_index]:
# return True
# else:
# return False
#
#
# else:
# print(self.align_tuple)
# new_index = 0
# if self.string1 < self.string2:
# new_index = residue_index - self.start_m
# else:
# new_index = residue_index + self.start_m
# for foo in range(len(self.alignment)):
# if foo == len(self.alignment)-1:
# break
# if not foo == residue_index+1:
# if self.alignment[foo] == "-":
# new_index += 1
# if new_index < len(self.alignment):
# if self.align_tuple[1][new_index] == self.string2[residue_index]:
# #if self.string2[residue_index] in self.align_tuple[1]:
# return True
# else:
# return False
# else:
# return False<file_sep>##############
# Exercise 2.6
##############
import os
from collections import Counter
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def aa_dist(self, aa_seq):
counted = Counter(aa_seq)
for key in counted:
counted[key] /= len(aa_seq)
return counted
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
return len(self.__sequences)
def get_average_length(self):
acc = 0
for x in self.__sequences:
acc = acc + (len(x.replace('*', '')))
return acc/self.get_counts()
def read_fasta(self, path):
with open(path, 'r') as f:
seq = ''
sequence_started = False
for line in f:
if line.startswith('>') or line.startswith(';'):
if sequence_started:
self.__sequences.append(seq)
seq = ''
sequence_started = False
continue
sequence_started = True
seq += line.strip()
self.__sequences.append(seq)
print(self.__sequences)
def get_abs_frequencies(self):
freq = {
'A': 0,
'R': 0,
'N': 0,
'D': 0,
'C': 0,
'E': 0,
'Q': 0,
'G': 0,
'H': 0,
'I': 0,
'L': 0,
'K': 0,
'M': 0,
'F': 0,
'P': 0,
'S': 0,
'T': 0,
'W': 0,
'Y': 0,
'V': 0,
}
for x in self.__sequences:
for y in x.replace('*', ''):
freq[y] = freq[y] + 1
return freq
def get_av_frequencies(self):
freq = {
'A': 0,
'R': 0,
'N': 0,
'D': 0,
'C': 0,
'E': 0,
'Q': 0,
'G': 0,
'H': 0,
'I': 0,
'L': 0,
'K': 0,
'M': 0,
'F': 0,
'P': 0,
'S': 0,
'T': 0,
'W': 0,
'Y': 0,
'V': 0,
}
acc = 0
for x,y in self.get_abs_frequencies().items():
acc += y
for k, v in self.get_abs_frequencies().items():
freq[k] = v/(acc)
return freq
<file_sep>##############
# Exercise 2.6
##############
from collections import Counter
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
return len(self.__sequences)
def get_average_length(self):
return sum(len(s) for s in self.__sequences) / self.get_counts()
def read_fasta(self, path):
with open(path) as fasta_file:
sequence = ''
for line in fasta_file:
if not (line.startswith('>') or line.startswith(';')):
sequence += line.strip().replace('*', '')
else:
if sequence != '':
self.__sequences.append(sequence)
sequence = ''
if sequence != '':
self.__sequences.append(sequence)
def get_abs_frequencies(self):
# return number of occurences not normalized by length
return Counter(''.join(self.__sequences))
def get_av_frequencies(self):
# return number of occurences normalized by length
length = sum(len(s) for s in self.__sequences)
counter = self.get_abs_frequencies()
for key in counter:
counter[key] /= length
return counter
<file_sep>##############
# Exercise 2.6
##############
from collections import Counter
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
print(self.__sequences)
def get_counts(self):
return len(self.__sequences)
def get_average_length(self):
avg_len = 0
for seq in self.__sequences:
avg_len += len(seq)
avg_len = avg_len/self.get_counts()
return avg_len
def read_fasta(self, path):
with open(path, "r") as f:
seq = ""
sequence_started = False
for line in f:
if line.startswith(">") or line.startswith(";"):
if sequence_started:
if seq[-1] == "*":
seq = seq[:-1]
self.__sequences.append(seq)
seq = ""
sequence_started = False
continue
sequence_started = True
seq += line.strip()
if seq[-1] == "*":
seq = seq[:-1]
self.__sequences.append(seq)
def get_abs_frequencies(self):
# return number of occurences not normalized by length
count = Counter()
for seq in self.__sequences:
count.update(seq)
return count
def get_av_frequencies(self):
# return number of occurences normalized by length
count = self.get_abs_frequencies()
for key in count:
count[key]/=self.get_average_length()*len(self.__sequences)
return count
A = AADist('tests/tests.fasta')
print(A.get_av_frequencies())
<file_sep>##############
# Exercise 2.6
##############
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
return len(self.__sequences)
def get_average_length(self):
total_length = self.get_total_length()
return total_length/self.get_counts()
def get_total_length(self):
total_length = 0
for seq in self.__sequences:
print(seq)
total_length += len(seq[1])
return total_length
def read_fasta(self, path):
with open(path, "r") as f:
seq = ""
header = ""
sequence_started = False
for line in f:
if line.startswith(">") or line.startswith(";") or line.startswith('\n'):
if sequence_started:
self.__sequences.append((header, seq))
seq = ""
header = ""
sequence_started = False
else:
header += line.strip()
else:
sequence_started = True
seq += line.strip().strip('*')
self.__sequences.append((header, seq))
def get_abs_frequencies(self):
# return number of occurences not normalized by length
all_aa = {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'Q': 0, 'E': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0, 'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0}
for seq in self.__sequences:
for aa in seq[1]:
all_aa[aa] += 1
return all_aa
def get_av_frequencies(self):
# return number of occurences normalized by length
all_aa_total = self.get_abs_frequencies()
total_length = self.get_total_length()
for aa in all_aa_total.keys():
all_aa_total[aa] = all_aa_total[aa]/total_length
return all_aa_total
<file_sep>##############
# Exercise 2.6
##############
from collections import Counter
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
return len(self.__sequences)
def get_average_length(self):
lengths = [len(x) for x in self.__sequences]
test = sum(lengths)/len(lengths)
return test
def read_fasta(self, path):
with open(path, 'r') as f:
seqs = []
headers = []
results = []
header_finished = False
for line in f:
if line.startswith('>') or line.startswith(';'):
if header_finished:
results.append(''.join(seqs))
seqs = []
else:
headers.append(line[1:].strip())
else:
header_finished = True
seqs.append(line.strip())
results.append(''.join(seqs))
for i, res in enumerate(results):
results[i] = res.replace('*', '')
self.__sequences = results
return results
def get_abs_frequencies(self):
counted = Counter("".join(self.__sequences))
return counted
def get_av_frequencies(self):
counted = Counter("".join(self.__sequences))
for key in counted:
counted[key] /= len("".join(self.__sequences))
return counted
if __name__ == '__main__':
t = AADist('FASTA.txt')
<file_sep>##############
# Exercise 2.7
##############
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
return aa.lower() in ['r', 'h', 'k']
def isNegativelyCharged(aa):
return aa.lower() in ['d', 'e']
def isHydrophobic(aa):
return aa.lower() in ['a', 'v', 'i', 'l', 'f', 'w', 'y', 'm']
def isAromatic(aa):
return aa.lower() in ['f', 'w', 'y', 'h']
def isPolar(aa):
return aa.lower() in ['n', 'q', 's', 't', 'y', 'r', 'd', 'e', 'h','k']
def isProline(aa):
return aa.lower() == 'p'
def containsSulfur(aa):
return aa.lower() in ['c', 'm']
def isAcid(aa):
return aa.lower() in ['d', 'e']
def isBasic(aa):
return aa.lower() in ['r', 'h', 'k']
'''
isCharged
• isPositivelyCharged
• isNegativelyCharged
• isHydrophobic
• isAromatic
• isPolar
• isProline
• containsSulfur
• isAcid
• isBasic
'''<file_sep>import numpy as np
import sys
from pathlib import Path
from collections import defaultdict
import time
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
class DatabaseObj:
seq = ""
def __init__(self, seq):
if seq is not None:
self.seq = seq
else:
self.seq = ""
def contains(self, word):
return self.seq.__contains__(word)
def all_indices(self, sub, offset=0):
list_index = []
i = self.seq.find(sub, offset)
while i >= 0:
list_index.append(i)
i = self.seq.find(sub, i + 1)
return list_index
class Database:
sequences = []
word_size = -1
seq_word_dict = defaultdict(lambda: defaultdict(list))
def __init__(self, word_size):
self.word_size = word_size
pass
def append(self, seq):
self.sequences.append(DatabaseObj(seq))
seq_index = len(self.sequences) - 1
for i in range(len(seq) - (self.word_size - 1)):
self.seq_word_dict[seq[i:i + self.word_size]][seq_index].append(i)
pass
def get_containing(self, word):
unique = np.zeros(len(self.sequences))
seq_dict = self.seq_word_dict[word]
result = []
for x in seq_dict:
result.append(self.sequences[x].seq)
return result
def count_distinct_words_in_seq(self):
sum = 0
for single_dict in self.seq_word_dict.values():
sum += len(single_dict)
return sum
def db_stats(self):
distinct_count = self.count_distinct_words_in_seq()
result = (len(self.sequences), len(self.seq_word_dict),
int(round(distinct_count/len(self.sequences))),
int(round(distinct_count/len(self.seq_word_dict))))
return result
def current_millis(self):
return lambda: int(round(time.time() * 1000))
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.db = Database(3)
pass
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
self.db.append(sequence)
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
return self.db.get_containing(word)
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
return self.db.db_stats()
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.sub_matrix = substitution_matrix
self.word_size = 3
pass
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
result = {}
if sequence is not None:
unique_input_words = self.get_unique_words(sequence)
for single_word in unique_input_words:
subs = defaultdict(list)
base_score = self.get_score_three_word(single_word, single_word)
for i in range(len(single_word)):
subs[i] = self.get_substitutes_for(single_word[i], base_score - T)
found_sub_candidate = self.build_word_from_dict(subs)
# check if new words all have high enough score
for candidate in found_sub_candidate:
if self.get_score_three_word(single_word, candidate) >= T:
result[candidate] = 1
elif pssm is not None:
dict = {}
for i in range(len(pssm)):
dict[i] = self.get_worthy_char_from_row(pssm[i])
found_sub_candidate = []
for i in range(len(pssm) - (self.word_size - 1)):
for possible_candidate in self.build_word_from_dict(dict, i):
if self.get_pssm_score_for_word(pssm, i, possible_candidate) >= T:
result[possible_candidate] = 1
return result.keys()
def get_unique_words(self, seq):
dict = {}
for i in range(len(seq) - (self.word_size - 1)):
dict[seq[i:i + self.word_size]] = 1
return dict.keys()
def get_score_three_word(self, from_word, to_word):
return self.sub_matrix[AA_TO_INT[from_word[0]]][AA_TO_INT[to_word[0]]] +\
self.sub_matrix[AA_TO_INT[from_word[1]]][AA_TO_INT[to_word[1]]] +\
self.sub_matrix[AA_TO_INT[from_word[2]]][AA_TO_INT[to_word[2]]]
def get_substitutes_for(self, character, threshold):
row = self.sub_matrix[AA_TO_INT[character]]
own_sub = row[AA_TO_INT[character]]
result = []
for i in range(len(row)):
if own_sub - row[i] <= threshold:
result.append(INT_TO_AA[i])
return result
def build_word_from_dict(self, dict, start=0):
result = [""]
#for i in range(len(dict)):
for i in range(start, start + self.word_size):
new_result = []
for part in result:
for new_char in dict[i]:
new_result.append(part + new_char)
result = new_result
return result
def get_worthy_word_from_pssm(self, pssm):
pass
def get_worthy_char_from_row(self, row):
result = []
min = - 5
for i in range(len(row)):
if row[i] >= min:
result.append(INT_TO_AA[i])
return result
def get_pssm_score(self, pssm, i, char):
return pssm[i][AA_TO_INT[char]]
def get_pssm_score_for_word(self, pssm, start, str):
counter = 0
sum = 0
for char in str:
sum += self.get_pssm_score(pssm, start + counter, char)
counter += 1
return sum
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]
<file_sep># -*- coding: utf-8 -*-
"""
IMPORTANT!:
Before writing an email asking questions such as
'What does this input has to be like?' or
'What return value do you expect?' PLEASE read our
exercise sheet and the information in this template
carefully.
If something is still unclear, PLEASE talk to your
colleagues before writing an email!
If you experience technical issues or if you find a
bug we are happy to answer your questions. However,
in order to provide quick help in such cases we need
to avoid unnecessary emails such as the examples
shown above.
"""
from typing import List
from Bio.PDB import Chain, Residue, Atom
from Bio.PDB.MMCIFParser import (
MMCIFParser,
) # Tip: This module might be useful for parsing...
import numpy as np
from Bio import SeqUtils
############# Exercise 2: Protein Data Bank #############
# General remark: In our exercise every structure will have EXACTLY ONE model.
# This is true for nearly all X-Ray structures. NMR structures have several models.
class PDB_Parser:
CIF_PARSER = MMCIFParser() # parser object for reading in structure in CIF format
def __init__(self, path):
"""
Initialize every PDB_Parser with a path to a structure-file in CIF format.
An example file is included in the repository (7ahl.cif).
Tip: Store the parsed structure in an object variable instead of parsing it
again & again ...
"""
self.structure = PDB_Parser.CIF_PARSER.get_structure(
filename=path, structure_id="7AHL"
) # Parse the structure once and re-use it in the functions below
# 3.8 Chains
def get_number_of_chains(self):
"""
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
Return:
Number of chains in this structure as integer.
"""
return len(list(self.structure.get_chains()))
# 3.9 Sequence
def get_sequence(self, chain_id):
"""
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the amino acid sequence (single-letter alphabet!) of a given chain (chain_id)
in a Biopython.PDB structure as a string.
"""
sequences = list(self.structure.get_chains())
for i in sequences:
if i.id == chain_id:
ret = ""
for j in i.get_list():
ret += SeqUtils.seq1(j.resname)
return ret.replace("X", "")
return None
# 3.10 Water molecules
def get_number_of_water_molecules(self, chain_id):
"""
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the number of water molecules of a given chain (chain_id)
in a Biopython.PDB structure as an integer.
"""
number_of_water_molecules = 0
for model in self.structure:
chain = model[chain_id]
hetero_flag_list = list(map(lambda x: x.id[0], chain))
number_of_water_molecules += hetero_flag_list.count("W")
return number_of_water_molecules
# 3.11 C-Alpha distance
def get_ca_distance(self, chain_id_1, index_1, chain_id_2, index_2):
"""
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id_1 : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
index_1 : index of a residue in a given chain in a Biopython.PDB structure
chain_id_2 : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
index_2 : index of a residue in a given chain in a Biopython.PDB structure
chain_id_1 and index_1 describe precisely one residue in a PDB structure,
chain_id_2 and index_2 describe the second residue.
Return:
Return the C-alpha (!) distance between the two residues, described by
chain_id_1/index_1 and chain_id_2/index_2. Round the returned value via int().
The reason for using two different chains as an input is that also the distance
between residues of different chains can be interesting.
Different chains in a PDB structure can either occur between two different proteins
(Heterodimers) or between different copies of the same protein (Homodimers).
"""
return int(
self.structure[0][chain_id_1][index_1]["CA"]
- self.structure[0][chain_id_2][index_2]["CA"]
)
@staticmethod
def _get_valid_residue_indices(chain: Chain):
def accessing_residue_doesnt_throw_an_error(residue, _chain):
try:
residue: Residue = _chain[residue]
return residue.id[0] != "W"
except KeyError:
return False
# wtf who writes a python library that starts to count with 1 for a __get__ implementation o.0
valid_residues = []
for residue in chain:
if residue.id[0] != "W":
valid_residues.append(residue)
return valid_residues
return list(
filter(
lambda idx: accessing_residue_doesnt_throw_an_error(idx, chain),
range(1, len(chain) + 1),
)
)
# 3.12 Contact Map
def get_contact_map(self, chain_id):
"""
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return a complete contact map (see description in exercise sheet)
for a given chain in a Biopython.PDB structure as numpy array.
The values in the matrix describe the c-alpha distance between all residues
in a chain of a Biopython.PDB structure.
Only integer values of the distance have to be given (see below).
"""
def get_distance_of_residues_with_indices(_idx, _jdx):
return self.get_ca_distance(
chain_id,
valid_residues_idx[_idx].id[1],
chain_id,
valid_residues_idx[_jdx].id[1],
)
chain = self.structure[0][chain_id]
valid_residues_idx = self._get_valid_residue_indices(chain)
number_of_valid_residues = len(valid_residues_idx)
print(valid_residues_idx)
contact_map = np.fromfunction(
np.vectorize(get_distance_of_residues_with_indices),
shape=(number_of_valid_residues, number_of_valid_residues),
dtype=np.int,
)
return contact_map
# 3.13 B-Factors
def get_bfactors(self, chain_id: str):
"""
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the B-Factors for all residues in a chain of a Biopython.PDB structure.
The B-Factors describe the mobility of an atom or a residue.
In a Biopython.PDB structure B-Factors are given for each atom in a residue.
Calculate the mean B-Factor for a residue by averaging over the B-Factor
of all atoms in a residue.
Sometimes B-Factors are not available for a certain residue;
(e.g. the residue was not resolved); insert np.nan for those cases.
Finally normalize your B-Factors using Standard scores (zero mean, unit variance).
You have to use np.nanmean, np.nanvar etc. if you have nan values in your array.
The returned data structure has to be a numpy array rounded again to integer.
"""
def b_factor_residue(residue: Residue):
atoms: List[Atom] = list(residue.get_atoms())
atoms_bfactors: List[float] = list(
map(lambda atom: atom.get_bfactor(), atoms)
)
return np.mean(atoms_bfactors)
chain: Chain = self.structure[0][chain_id]
valid_residues_idx = self._get_valid_residue_indices(chain)
b_values: np.ndarray = np.fromfunction(
np.vectorize(lambda idx: b_factor_residue(valid_residues_idx[idx])),
shape=(len(valid_residues_idx),),
dtype=np.int,
)
b_values_normalized: np.ndarray = (b_values - np.mean(b_values)) / np.std(
b_values
)
return b_values_normalized.astype(dtype=np.int)
def main():
print("PDB parser class.")
return None
if __name__ == "__main__":
main()
<file_sep>import numpy as np
from itertools import chain
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1 # cols
self.string2 = string2 # rows
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.align()
(row,col) = self._max_score_index()
self.alignments = list(self._backtrack("", "", row, col))
def _max_score_index(self):
return np.unravel_index(self.score_matrix.argmax(), self.score_matrix.shape)
def _score(self, row, col):
c1 = self.string1[col-1]
c2 = self.string2[row-1]
v1 = self.score_matrix[row-1][col-1] + self.substitution_matrix[c1][c2]
v2 = self.score_matrix[row-1][col] + self.gap_penalty # vert move; gap in col string -> string1
v3 = self.score_matrix[row][col-1] + self.gap_penalty # hori move; gap in row string -> string2
return (v1, v2, v3, 0)
def _backtrack(self, string1, string2, row, col):
value = self.score_matrix[row][col]
if row < 1 or col < 1 or value == 0: # or and?
print("end: ({},{}): {}".format(row, col, value))
return [(string1, string2)]
char1 = self.string1[col-1]
char2 = self.string2[row-1]
scores = self._score(row, col)
result = []
if scores[0] == value and char1 == char2:
print("v1: {}=={} ({},{}) {}".format(char1, char2, row, col, scores))
result.append(
self._backtrack(char1+string1, char1+string2, row-1, col-1))
if scores[0] == value and char1 != char2:
print("v1': {}!={} ({},{}) {}".format(char1, char2, row, col, scores))
result.append(
self._backtrack(char1+string1, char2+string2, row-1, col-1))
if scores[1] == value:
print("v2: {} {} ({},{}) {}".format(char1, char2, row, col, scores))
result.append(
self._backtrack('-'+string1, char2+string2, row-1, col))
if scores[2] == value:
print("v2: {} {} ({},{}) {}".format(char1, char2, row, col, scores))
result.append(
self._backtrack(char1+string1, '-'+string2, row, col-1))
if len(result) == 0:
print("no recursion ({},{}))".format(row, col))
return chain.from_iterable(result)
def align(self):
"""
Align given strings using the Smith-Waterman algorithm.
NB: score matrix and the substitution matrix are different matrices!
"""
for (row,col), _ in np.ndenumerate(self.score_matrix):
if row == 0 or col == 0:
continue
else:
self.score_matrix[row][col] = max(self._score(row, col))
def has_alignment(self):
"""
:return: True if a local alignment has been found, False otherwise
"""
return len(self.alignments[0][0]) > 0
def get_alignment(self):
"""
:return: alignment represented as a tuple of aligned strings
"""
return self.alignments[0]
def is_residue_aligned(self, string_number, residue_index):
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been alined
False otherwise
"""
(row,col) = self._max_score_index() # str2, str1
if string_number == 1:
alignment = self.alignments[0][0]
axis = col
elif string_number == 2:
alignment = self.alignments[0][1]
axis = row
else:
raise Exception("string_index must be either 1 or 2")
length = len(alignment.replace('-', ''))
start_idx = axis - length
end_idx = axis-1
in_bounds = residue_index >= start_idx and residue_index <= end_idx
a_idx = residue_index - start_idx
not_skipped = False if a_idx >= len(alignment) else alignment[a_idx] != '-'
return bool(in_bounds and not_skipped)
<file_sep>##############
# Exercise 2.5
##############
# You can use the supplied test cases for your own testing. Good luck!
import re
start = "ATG"
stop = ["TAA", "TAG", "TGA"]
codon_dict = dict()
def complementary(input):
m = dict({
'A': 'T',
'T': 'A',
'G': 'C',
'C': 'G'
})
return "".join([m[letter] for letter in input.upper()])
def initDict():
all = ['G', 'T', 'A', 'C']
# G
codon_dict.update({'GG'+c: 'G' for c in all})
codon_dict.update({'GC'+c: 'A' for c in all})
codon_dict.update({'GT'+c: 'V' for c in all})
codon_dict.update({'GA'+c: 'E' for c in ['G', 'A']})
codon_dict.update({'GA'+c: 'D' for c in ['C', 'T']})
# C
codon_dict.update({'CT'+c: 'L' for c in all})
codon_dict.update({'CC'+c: 'P' for c in all})
codon_dict.update({'CG'+c: 'R' for c in all})
codon_dict.update({'CA'+c: 'Q' for c in ['G', 'A']})
codon_dict.update({'CA'+c: 'H' for c in ['C', 'T']})
# A
codon_dict.update({'AC'+c: 'T' for c in all})
codon_dict.update({'AT'+c: 'I' for c in ['A', 'C', 'T']})
codon_dict.update({'ATG': 'M'})
codon_dict.update({'AA'+c: 'K' for c in ['G', 'A']})
codon_dict.update({'AA'+c: 'N' for c in ['C', 'T']})
codon_dict.update({'AG'+c: 'R' for c in ['G', 'A']})
codon_dict.update({'AG'+c: 'S' for c in ['C', 'T']})
# T
codon_dict.update({'TC'+c: 'S' for c in all})
codon_dict.update({'TT'+c: 'L' for c in ['G', 'A']})
codon_dict.update({'TT'+c: 'F' for c in ['C', 'T']})
codon_dict.update({'TA'+c: 'Y' for c in ['C', 'T']})
codon_dict.update({'TA'+c: 'STOP' for c in ['G', 'A']})
codon_dict.update({'TG'+c: 'C' for c in ['C', 'T']})
codon_dict.update({'TGA': 'STOP'})
codon_dict.update({'TGG': 'W'})
def triplet_to_aa(t):
if len(t) != 3:
return None
return codon_dict.get(t)
def validate(genome):
if len(re.sub("[^TAGC]+", '', genome)) < len(genome):
raise TypeError
def get_next(genome, start_index):
if start_index + 3 < len(genome):
return (genome[start_index:start_index+3], start_index+3)
elif start_index + 3 == len(genome):
return (genome[start_index:start_index+3], 0)
elif start_index + 3 > len(genome) and start_index + 3 < len(genome) + 3:
res = genome[start_index:len(genome)]
next_index = start_index - len(genome) + 3
res = res + genome[0:next_index]
return (res, next_index)
else:
raise RuntimeError
def read(genome, start_index, reversed):
validate(genome)
current_index = start_index
first_sequence_index = None
reading_sequence = False
done = False
first_stop = None
sequences = dict()
aa_sequence = ""
while not done:
triplet, next_index = get_next(genome, current_index)
if not reading_sequence and triplet == start:
first_sequence_index = current_index
reading_sequence = True
if reading_sequence and triplet in stop:
reading_sequence = False
if first_stop is None:
first_stop = current_index
else:
if current_index == first_stop:
done = True
if len(aa_sequence) > 33:
from_index = first_sequence_index
to_index = next_index - 1 if next_index > 0 else len(genome)-1
if reversed:
from_index = len(genome) - from_index - 1
to_index = len(genome) - to_index - 1
new = (from_index, to_index, aa_sequence, reversed)
old = sequences.get(to_index)
if old is None:
sequences[to_index] = new
else:
_, _, seq, _ = sequences[to_index]
if len(seq) > len(aa_sequence):
sequences[to_index] = new
aa_sequence = ""
if reading_sequence:
aa_sequence += (triplet_to_aa(triplet))
current_index = next_index
return sequences
def get_orfs(genome):
initDict()
l = []
res = read(genome, 0, False)
for last_index, orf in read(genome, 1, False).items():
if res.get(last_index) is not None:
_, _, old_seq, _ = res.get(last_index)
_, _, new_seq, _ = orf
if len(new_seq) > len(old_seq):
res[last_index] = orf
for last_index, orf in read(genome, 2, False).items():
if res.get(last_index) is not None:
_, _, old_seq, _ = res.get(last_index)
_, _, new_seq, _ = orf
if len(new_seq) > len(old_seq):
res[last_index] = orf
l = list(res.values())
res = read(complementary(genome)[::-1], 0, True)
for last_index, orf in read(complementary(genome)[::-1], 1, True).items():
if res.get(last_index) is not None:
_, _, old_seq, _ = res.get(last_index)
_, _, new_seq, _ = orf
if len(new_seq) > len(old_seq):
res[last_index] = orf
for last_index, orf in read(complementary(genome)[::-1], 2, True).items():
if res.get(last_index) is not None:
_, _, old_seq, _ = res.get(last_index)
_, _, new_seq, _ = orf
if len(new_seq) > len(old_seq):
res[last_index] = orf
l += list(res.values())
return l<file_sep>##############
# Exercise 2.5
##############
# You can use the supplied test cases for your own testing. Good luck!
codon_table = {
# Alanine (A)
"GCT": "A",
"GCC": "A",
"GCA": "A",
"GCG": "A",
# Arginine (R)
"CGT": "R",
"CGC": "R",
"CGA": "R",
"CGG": "R",
"AGA": "R",
"AGG": "R",
# Asparagine (N)
"AAT": "N",
"AAC": "N",
# Aspartic acid (D)
"GAT": "D",
"GAC": "D",
# Cysteine (C)
"TGT": "C",
"TGC": "C",
# Glutamine (Q)
"CAA": "Q",
"CAG": "Q",
# Glutamic acid (E)
"GAA": "E",
"GAG": "E",
# Glycine (G)
"GGT": "G",
"GGC": "G",
"GGA": "G",
"GGG": "G",
# Histidine (H)
"CAT": "H",
"CAC": "H",
# Isoleucine (I)
"ATT": "I",
"ATC": "I",
"ATA": "I",
# Methionine (M)
"ATG": "M",
# Leucine (L)
"TTA": "L",
"TTG": "L",
"CTT": "L",
"CTC": "L",
"CTA": "L",
"CTG": "L",
# Lysine (K)
"AAA": "K",
"AAG": "K",
# Phenylalanine (F)
"TTT": "F",
"TTC": "F",
# Proline (P)
"CCT": "P",
"CCC": "P",
"CCA": "P",
"CCG": "P",
# Serine (S)
"TCT": "S",
"TCC": "S",
"TCA": "S",
"TCG": "S",
"AGT": "S",
"AGC": "S",
# Threonine (T)
"ACT": "T",
"ACC": "T",
"ACA": "T",
"ACG": "T",
# Tryptophan (W)
"TGG": "W",
# Tyrosine (Y)
"TAT": "Y",
"TAC": "Y",
# Valine (V)
"GTT": "V",
"GTC": "V",
"GTA": "V",
"GTG": "V",
# STOP
"TAA": "STOP",
"TGA": "STOP",
"TAG": "STOP"
}
def codons_to_aa(orf):
if len(orf) % 3 is not 0:
return None
codons = [orf[i:i+3] for i in range(0, len(orf), 3)]
aa_seq = ''.join(codon_table[c] for c in codons)
return aa_seq
def get_orfs(genome):
for n in genome:
if n not in "ATGC":
raise TypeError
return []<file_sep>##############
# Exercise 2.6
##############
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
counter = 0
contents = str(self)
for x in contents:
if x == ";" or x == ">":
counter = counter + 1
return counter
def get_average_length(self):
sum = 0
contents = str(self)
counter = self.get_counts()
for x in contents:
if x[0] == ";" or x[0] == ">":
counter = counter + 1
else:
sum = sum + len(x)
if counter == 0:
return print("no sequence found")
else:
return sum/counter
def read_fasta(self, filename):
sequence =""
with open(filename, "r") as f:
seq = ""
sequence_started = False
for line in f:
if line.startswith(">") or line.startswith(";"):
if sequence_started:
sequence = sequence + line
seq = ""
sequence_started = False
continue
sequence_started = True
seq += line.strip()
sequence = sequence + line
def get_abs_frequencies(self):
# return number of occurences not normalized by length
contents = str(self)
cr = 0
ch = 0
ck = 0
cd = 0
ce = 0
cs = 0
ct = 0
cn = 0
cq = 0
cc = 0
cu = 0
cg = 0
cp = 0
ca = 0
cv = 0
ci = 0
cl = 0
cm = 0
cf = 0
cy = 0
cw = 0
for x in contents:
if x[0] == ";" or x[0] == ">":
continue
else:
for i in range (0, len(contents)):
if contents[i] == "R":
cr += 1
elif contents[i] == "H":
ch += 1
elif contents[i] == "K":
ck += 1
elif contents[i] == "D":
cd += 1
elif contents[i] == "E":
ce += 1
elif contents[i] == "S":
cs += 1
elif contents[i] == "T":
ct += 1
elif contents[i] == "N":
cn += 1
elif contents[i] == "Q":
cq += 1
elif contents[i] == "C":
cc += 1
elif contents[i] == "U":
cu += 1
elif contents[i] == "G":
cg += 1
elif contents[i] == "P":
cp += 1
elif contents[i] == "A":
ca += 1
elif contents[i] == "V":
cv += 1
elif contents[i] == "I":
ci += 1
elif contents[i] == "L":
cl += 1
elif contents[i] == "M":
cm += 1
elif contents[i] == "F":
cf += 1
elif contents[i] == "Y":
cy += 1
elif contents[i] == "W":
cw += 1
return cr, ch, ck, cd, ce, cs, ct, cn, cq, cc, cu, cg, cp, ca, cv, ci, cl, cm, cf, cy, cw
def get_av_frequencies(self):
# return number of occurences normalized by length
contents = str(self)
allsum = 0
for x in contents:
if x[0] == ";" or x[0] == ">":
continue
else:
allsum = allsum + len(x)
<file_sep>##############
# Exercise 2.6
##############
from collections import Counter
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
parsed_fasta = []
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
if (len(self.parsed_fasta) > 0 ):
return len(self.parsed_fasta)
else:
return -1
def get_average_length(self):
avg = 0
for seq in self.parsed_fasta:
# print ((len(seq)))
avg = avg + len(seq)
avg = float(avg) / float(self.get_counts())
print (avg)
return avg
#####################
## Exercise 1.4
#####################
#def read_fasta(self, path):
def read_fasta(self, filename):
with open(filename, "r") as f:
fasta_parse_temp = []
seq = ""
sequence_started = False
for line in f:
if line.startswith(">") or line.startswith(";"):
if sequence_started:
fasta_parse_temp.append(seq)
seq = ""
sequence_started = False
continue
sequence_started = True
seq += line.strip()
if "*" in seq:
seq = seq[0:len(seq)-1]
fasta_parse_temp.append(seq)
self.parsed_fasta = fasta_parse_temp
lent = len(self.parsed_fasta)
print(lent)
print (fasta_parse_temp)
def get_abs_frequencies(self):
# return number of occurences not normalized by length
avg = 0
list_of_c = []
list_of_letters = []
list_of_freq = []
list_of_abs_freq = {}
for seq in self.parsed_fasta:
# print ((len(seq)))
s = list(seq)
c = Counter(s)
print (c)
list_of_c.append(c)
print (list_of_c)
# for item in list_of_c.values():
# print item[0]
for x in range(0, len(list_of_c)):
print (x)
for c in list_of_c:
print (c)
for ce in list_of_c:
# print(ce)
for c in ce:
# print (c[0])
# print (c, ce[c])
if c in list_of_letters:
continue
else:
list_of_letters.append(c)
list_of_letters.sort()
for p in list_of_letters:
freq = 0
for counter in list_of_c:
for count in counter:
if count == p:
freq = freq + counter[count]
list_of_abs_freq[p] = freq
print(list_of_abs_freq)
for key, value in list_of_abs_freq.items():
print(key, value)
return list_of_abs_freq
# for x in range (0, len(list_of_c)):
# print(list_of_c[x])
# for y in range (0, len(list_of_letters)):
# print(list_of_c[x][list_of_letters[y]])
# pass
# for letter in list_of_letters:
# for ce in list_of_c:
# freq = 0
# for c in ce:
# if letter == c:
# freq = freq + ce[c]
# list_of_freq.append(ce[c])
# for x in range(0, len(list_of_letters)):
# list_of_abs_freq[x] = list_of_freq[x]
# for x in list_of_letters:
# list_of_abs_freq[c] = list_of_freq[x]
# print (list_of_abs_freq)
# # for count in list_of_c:
# # for key, val in count.items():
# # print (key, val)
# # pass
def get_av_frequencies(self):
# return number of occurences normalized by length
abs_freq = self.get_abs_frequencies()
avs_freq = {}
total = 0
for seq in self.parsed_fasta:
# print ((len(seq)))
s = list(seq)
c = Counter(s)
total = total + sum(c.values())
print (total)
for key, value in abs_freq.items():
value = value / float(total)
avs_freq[key] = value
return avs_freq
#####################
## Exercise 1.3
#####################
def aa_dist(aa_seq):
counted = Counter(aa_seq)
for key in counted:
counted[key] /= len(aa_seq)
return counted
# def add_sequence(seq):
# # Clear the parsed FASTA list
# if len(fasta_parse) == 2:
# fasta_parse.clear()
# if seq.startswith(">") or seq.startswith(";"):
# seq = fasta_parse.append(seq)
# else:
# fasta_parse.append(seq)
# if len(fasta_parse)==2:
# print ("fasta_parse")
# return fasta_parse<file_sep>charged = ['R', 'H', 'K', 'D', 'E']
pos_charged = ['R', 'H', 'K']
neg_charged = ['D', 'E']
hydrophobic = ['A', 'V', 'I', 'L', 'M', 'F', 'Y', 'W']
aromatic = ['F', 'W', 'Y', 'H']
polar = ['R', 'N', 'D', 'E', 'Q', 'H', 'K', 'S', 'T', 'Y']
contains_sulfur = ['M', 'C']
acid = ['D', 'E']
basic = ['R', 'H', 'K']
def isCharged(aa):
if aa in charged:
return True
else:
return False
def isPositivelyCharged(aa):
if aa in pos_charged:
return True
else:
return False
def isNegativelyCharged(aa):
if aa in neg_charged:
return True
else:
return False
def isHydrophobic(aa):
if aa in hydrophobic:
return True
else:
return False
def isAromatic(aa):
if aa in aromatic:
return True
else:
return False
def isPolar(aa):
if aa in polar:
return True
else:
return False
def isProline(aa):
if aa == 'P':
return True
else:
return False
def containsSulfur(aa):
if aa in contains_sulfur:
return True
else:
return False
def isAcid(aa):
if aa in acid:
return True
else:
return False
def isBasic(aa):
if aa in basic:
return True
else:
return False<file_sep>##############
# Exercise 2.5
##############
# You can use the supplied test cases for your own testing. Good luck!
import re
from helper import codons_to_aa
from helper import complementary
def get_orfs(genome):
if genome == '' or not re.match(r'^([ATGC]+)$', genome):
raise TypeError('Type Error')
orfs = []
orfs_dict = {} #key: tuple -> (last DNA index of stop codon, isReverseComplement). #value: tuple -> (first DNA index of start codon, translated AA seq)
lenGenome = len(genome)
genome = genome+genome
for isReverseComplement in [False, True]:
if isReverseComplement:
genome = genome[::-1]
genome = complementary(genome)
for start_index in range(0,3):
encoding = False
aaStartIndex = 0
aaFinishIndex = 0
aa = codons_to_aa(genome[start_index:])
for i in range(0,len(aa)):
if aa[i] == 'M' and not encoding:
aaStartIndex = i
encoding = True
if aa[i] == '.' and encoding:
aaFinishIndex = i
encoding = False
firstDNAIndex = (start_index + (aaStartIndex*3))%(lenGenome)
lastDNAIndex = (start_index + (aaFinishIndex*3)+2)%(lenGenome)
if isReverseComplement:
firstDNAIndex = (lenGenome-1-firstDNAIndex)%lenGenome
lastDNAIndex = (lenGenome-1-lastDNAIndex)%lenGenome
if (not (lastDNAIndex,isReverseComplement) in orfs_dict) or ((lastDNAIndex,isReverseComplement) in orfs_dict and len(orfs_dict[(lastDNAIndex,isReverseComplement)][1]) < len(aa[aaStartIndex:aaFinishIndex])):
orfs_dict[(lastDNAIndex,isReverseComplement)] = (firstDNAIndex, aa[aaStartIndex:aaFinishIndex])
for tupleKey in orfs_dict:
if len(orfs_dict[tupleKey][1]) > 33:
orfs.append((orfs_dict[tupleKey][0],tupleKey[0],orfs_dict[tupleKey][1],tupleKey[1]))
return orfs
<file_sep>##############
# Exercise 2.5
##############
# You can use the supplied test cases for your own testing. Good luck!
def get_orfs(genome):
valid = 'ACTG'
for letter in genome:
if letter not in valid:
raise TypeError
<file_sep>import numpy as np
from numpy import unravel_index
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.maximum_score = 0
self.string_index_1 = list()
self.string_index_2 = list()
self.align()
def align(self):
"""
Align given strings using the Smith-Waterman algorithm.
NB: score matrix and the substitution matrix are different matrices!
"""
length_1 = len(self.string1)
length_2 = len(self.string2)
for column in range(length_1+1):
self.score_matrix[0][column] = 0
for row in range(length_2+1):
self.score_matrix[row][0] = 0
for i in range(length_2+1):
for j in range(length_1+1):
if i != 0 and j != 0:
self.score_matrix[i][j] = max(0, self.score_matrix[i - 1][j - 1] + self.substitution_matrix[self.string2[i-1]][self.string1[j-1]],\
self.score_matrix[i - 1][j] + self.gap_penalty, \
self.score_matrix[i][j - 1] + self.gap_penalty)
if self.score_matrix[i][j] > self.max:
self.maximum_score = self.score_matrix[i][j]
def has_alignment(self):
"""
:return: True if a local alignment has been found, False otherwise
"""
i, j = self.get_alignment()
if i == "" and j == "":
return False
else:
return True
def get_alignment(self):
"""
:return: alignment represented as a tuple of aligned strings
"""
(row, column) = unravel_index(self.score_matrix.argmax(), self.score_matrix.shape)
first_alignment = ""
second_alignment = ""
while row > 0 or column > 0:
condition = self.score_matrix[row][column] == self.score_matrix[row - 1][column - 1] + \
self.substitution_matrix[self.string2[row - 1]][self.string1[column - 1]]
if row > 0 and column > 0 and condition:
first_alignment = self.string2[row-1] + first_alignment
second_alignment = self.string1[column-1] + second_alignment
self.string_index_1.append(column-1)
self.string_index_2.append(row-1)
row -= 1
column -= 1
elif row > 0 and self.score_matrix[row][column] == self.score_matrix[row-1][column] + self.gap_penalty:
first_alignment = self.string2[row-1] + first_alignment
second_alignment = "-" + second_alignment
row -= 1
else:
first_alignment = "-" + first_alignment
second_alignment = self.string1[column-1] + second_alignment
column -= 1
count = len(second_alignment)
i = 0
while count > 0:
while first_alignment[0] == "-":
second_alignment = second_alignment[:i] + second_alignment[i+1:]
first_alignment = first_alignment[:i] + first_alignment[i+1:]
count -= 1
count = len(first_alignment)
count = len(first_alignment)
i = 0
while count > 0:
while first_alignment[0] == "-":
second_alignment = second_alignment[:i] + second_alignment[i+1:]
first_alignment = first_alignment[:i] + first_alignment[i+1:]
count -= 1
return (first_alignment, second_alignment)
def is_residue_aligned(self, string_number, residue_index):
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been alined
False otherwise
"""
self.get_alignment()
if string_number == 1:
index = self.string_index_1
else:
index = self.string_index_2
if residue_index in index:
return True
return False
<file_sep>from collections import Counter
##############
# Exercise 2.6
##############
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.sequences = []
self.head = []
self.read_fasta(filepath)
self.length1 = 0
def get_counts(self):
return len(self.sequences)
def get_average_length(self):
sum1 = 0
for i in self.sequences:
sum1 += (len(str(i)))
self.length1=sum1
return sum1/(len(self.sequences))
def read_fasta(self, path):
file = open(path, "r")
current_sequence = []
for line in file:
print(line)
line = line.strip()
if not line:
continue
if line.startswith(">"):
if len(current_sequence) > 0:
self.sequences.append(''.join(current_sequence))
active_sequence_name = line[1:]
self.head.append(active_sequence_name)
current_sequence = []
continue
sequence = line.split("*")[0]
current_sequence.append(sequence)
# Flush the last current_sequence block to the test list
if len(current_sequence) > 0:
self.sequences.append(''.join(current_sequence)) \
from pprint import pprint
def get_abs_frequencies(self):
test1 = dict(Counter("".join(self.sequences)))
return test1
def get_av_frequencies(self):
sum1 = 0
for i in self.sequences:
sum1 += (len(str(i)))
test_cp = dict(Counter("".join(self.sequences)))
from pprint import pprint
pprint(sum1)
test_cp.update( (x , y/sum1) for x , y in test_cp.items())
return test_cp
<file_sep>##############
# Exercise 2.7
##############
def return_helper(aa, list):
if aa in list:
return True
return False
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
return return_helper(aa, "RHK");
def isNegativelyCharged(aa):
return return_helper(aa, "DE");
def isHydrophobic(aa):
return return_helper(aa, "AVILMFYW");
def isAromatic(aa):
return return_helper(aa, "FWYH");
def isPolar(aa):
return return_helper(aa, "NQYSTRKHDE");
def isProline(aa):
return return_helper(aa, "P");
def containsSulfur(aa):
return return_helper(aa, "CM");
def isAcid(aa):
return return_helper(aa, "ED");
def isBasic(aa):
return return_helper(aa, "RHK");
<file_sep>import numpy as np
import copy
def get_chars_indices_string(string):
lens = len(string)
s = []
for i in range(lens):
s.insert(i, (i, string[i]))
return s
def traceback(i, j, string1,string2,substitution_matrix,gap_penalty,score_matrix,al1,al2):
while i>=1 and j>=1:
let2 = string2[i - 1]
let1 = string1[j - 1]
diag = score_matrix[i - 1][j - 1] + substitution_matrix[let2][let1]
ver = score_matrix[i - 1][j] + gap_penalty
hor = score_matrix[i][j - 1] + gap_penalty
maxv = max(diag, ver, hor)
occnum = [diag, ver, hor].count(maxv)
if occnum == 1:
if maxv == diag:
al1.append(let1)
al2.append(let2)
i -= 1
j -= 1
elif maxv == ver:
i -= 1
al2.append(let2)
al1.append('-')
elif maxv == hor:
al2.append('-')
al1.append(let1)
j -= 1
else:
if hor==maxv and diag==maxv:
aligneddiag1 = copy.deepcopy(al1)
aligneddiag2 = copy.deepcopy(al2)
alignednotdiag1 = copy.deepcopy(al1)
alignednotdiag2 = copy.deepcopy(al2)
aligneddiag1.append(let1)
aligneddiag2.append(let2)
alignednotdiag1.append(let1)
alignednotdiag2.append('-')
alignments = []
for al in traceback(i-1, j-1, string1,string2,substitution_matrix,gap_penalty,score_matrix,aligneddiag1,aligneddiag2):
alignments.append(al)
for al in traceback(i, j - 1, string1, string2, substitution_matrix, gap_penalty, score_matrix,
alignednotdiag1, alignednotdiag2):
alignments.append(al)
return alignments
if hor<ver:
aligneddiag1 = copy.deepcopy(al1)
aligneddiag2 = copy.deepcopy(al2)
alignednotdiag1 = copy.deepcopy(al1)
alignednotdiag2 = copy.deepcopy(al2)
aligneddiag1.append(let1)
aligneddiag2.append(let2)
alignednotdiag1.append('-')
alignednotdiag2.append(let2)
alignments = []
for al in traceback(i - 1, j - 1, string1, string2, substitution_matrix, gap_penalty, score_matrix,
aligneddiag1, aligneddiag2):
alignments.append(al)
for al in traceback(i-1, j, string1, string2, substitution_matrix, gap_penalty, score_matrix,
alignednotdiag1, alignednotdiag2):
alignments.append(al)
return alignments
return [(al1,al2)]
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substituion_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.align()
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
# scoring matrix skeleton
mat = []
l1 = list(range(0, (len(self.string1) + 1) * self.gap_penalty, self.gap_penalty))
for i in range(1, len(self.string2) + 1):
l = [i * self.gap_penalty] + [None] * (len(self.string1))
mat.append(l)
mat.insert(0, l1)
# scoring matrix filling
for i in range(1, len(mat)):
for j in range(1, len(mat[0])):
diag = mat[i - 1][j - 1] + self.substituion_matrix[self.string2[i - 1]][self.string1[j - 1]]
ver = mat[i - 1][j] + self.gap_penalty
hor = mat[i][j - 1] + self.gap_penalty
maxv = max(diag, ver, hor)
mat[i][j] = maxv
self.score_matrix = mat
#alignments = [[strings[longestindex], []]]
i = len(self.score_matrix)-1
j = len(self.score_matrix[0])-1
al1=[]
al2 = []
alignments=traceback(i, j, self.string1,self.string2,self.substituion_matrix,self.gap_penalty,self.score_matrix,al1,al2)
'''mat = self.score_matrix
i = len(mat) - 1
j = len(mat[0]) - 1
indstring = len(strings[shortestindex]) - 1
numal = 0
while indstring >= 0:
diag = mat[i][j]
if i == len(mat):
ver = None
else:
ver = mat[i - 1][j]
if j == len(mat[0]):
hor = None
else:
hor = mat[i][j - 1]
let = strings[shortestindex][indstring]
maxv = max(diag, ver, hor)
if diag > ver and diag > hor:
alignments[numal][1].append(let)
i -= 1
j -= 1
indstring -= 1
if diag != maxv:
alignments[numal][1].append('-')
if ver > hor:
i -= 1
if ver < hor:
j -= 1
if ver == hor:
cacca = ""
if diag == maxv and (hor == maxv or ver == maxv):
alignments.append([strings[longestindex], alignments[0][1]])
traceback(i,j,indstring,self.score_matrix,strings[shortestindex],alignments[0][1])'''
alignmentstup = []
for tup in alignments:
tup[0].reverse()
tup[1].reverse()
s1="".join(tup[0])
s2 = "".join(tup[1])
alignmentstup.append((s1,s2))
self.alignments=alignmentstup
'''def get_possible_alignments(self):
len1=len(self.string1)
len2 = len(self.string2)
s1 = get_chars_indices_string(self.string1)
s2 = get_chars_indices_string(self.string2)
different_elems = set(self.string1) ^ set(self.string2)
different_elems_pos = set(s1) ^ set(s2)
to_add1=max(0,len2-len1)
to_add2 = max(0, len1 - len2)
string1=self.string1
for i in range(to_add1):
string1+='-'
string2 = self.string2
for i in range(to_add1):
string2 += '-'
als=[]
# assuming self.string1 is longer
al=""
if to_add1!=to_add2:
if to_add1==0:
als.append([self.string1, self.string2])
for i in range(len(als[0][0])):
if als[0][0][i] == als[0][1][i]:
al += string1[i]
else:
if i in different_elems_pos:
cacca=""
al += '-'
ls = list(als[0][1])
ls.insert(i, '-')
als[0][1] = ''.join(ls)
else:
als.append([self.string2, self.string1])
for i in range(len(als[0][0])):
if als[0][0][i] == als[0][1][i]:
al += string1[i]
else:
al += '-'
ls = list(als[0][1])
ls.insert(i, '-')
als[0][1] = ''.join(ls)
pass'''
def get_best_score(self):
"""
:return: the highest score for the aligned strings, int
"""
return self.score_matrix[len(self.score_matrix)-1][len(self.score_matrix[0])-1]
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score
"""
return len(self.alignments)
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
'''als=[]
all=pairwise2.align.globalxx(self.string1, self.string2)
alln=[]
longestindex=0
if len(self.string2)>len(self.string1): longestindex=1
for al in all:
if '-' not in al[longestindex]:
alln.append(al)
for al in alln:
als.append((al[0],al[1]))
return als
#return [('ADMI-NS', 'ADMIRES'), ('ADMIN-S', 'ADMIRES')]'''
#Attention! string1 is used to index columns, string2 is used to index rows
'''alignments=[]
indmati = len(self.score_matrix) - 1
indmatj = len(self.score_matrix[0]) - 1
indmat = [indmati, indmatj]
strings=[self.string2,self.string1]
if len(self.string1)==len(self.string2):
return [(self.string1,self.string2)]
if len(self.string1)>len(self.string2):
longestindex=1
shortestindex=0
else:
longestindex=0
shortestindex =1
chardiff=abs(len(strings[0])-len(strings[1]))
gaps=[]
i=len(strings[longestindex])-1
while indmat[shortestindex]> 0 and indmat[longestindex]>0:
a=strings[0][indmat[0]-1]
b=strings[1][indmat[1]-1]
if self.string2[indmat[0]-1]==self.string1[indmat[1]-1]:
indmat[0]-=1
indmat[1] -= 1
else:
if strings[shortestindex][indmat[shortestindex]-1] in strings[longestindex]:
gaps.append(indmat[longestindex] - 1)
indmat[longestindex] -= 1
i-=1
else:
indmat[shortestindex] -= 1
for g in gaps:
strings[shortestindex]=strings[shortestindex][:g] + '-' + strings[shortestindex][g:]
alignments.append((strings[1],strings[0]))
return alignments'''
return self.alignments
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as a list of lists
Attention! string1 is used to index columns, string2 is used to index rows
"""
return self.score_matrix
'''def get_score(self, alignment):
al1=alignment[0]
al2 = alignment[1]
lenal=len(al1)
i=lenal-1
j = lenal - 1
if '-' in al1:
withoutgap=1
withgap = 0
if '-' in al2:
withgap = 1
withoutgap = 0
score=0
indmati = len(self.score_matrix) - 1
indmatj = len(self.score_matrix[0]) - 1
indmat = [indmati, indmatj]
indal=[i,j]
while indmat[0]+indmat[1]>=0:
score+=self.score_matrix[indmati][indmatj]
if '-' not in [al1[indal[0]],al2[indal[1]]]:
indal[0] -= 1
indal[1] -= 1
indmat[0]-=1
indmat[1] -= 1
else:
if alignment[withgap][indal[withgap]]=='-':
indmat[withoutgap]-=1
indal[withoutgap]-=1
return score'''
<file_sep>import numpy as np
from contextlib import suppress
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros(
(len(string2) + 1, len(string1) + 1), dtype=np.int)
self.pred_matrix = np.zeros(
(len(string2) + 1, len(string1) + 1), dtype=np.int)
self.align()
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
for r in range(len(self.string2) + 1):
for c in range(len(self.string1) + 1):
s1 = s2 = s3 = -999
# diag
with suppress(Exception):
d = self.substitution_matrix[
self.string1[c - 1]
][
self.string2[r - 1]
]
# d = 1 if self.string1[c - 1] == self.string2[r - 1] else 0
assert r - 1 >= 0
assert c - 1 >= 0
s1 = d + self.score_matrix[r - 1][c - 1]
# top
with suppress(Exception):
d = self.gap_penalty
assert r - 1 >= 0
s2 = d + self.score_matrix[r - 1][c]
# right
with suppress(Exception):
d = self.gap_penalty
assert c - 1 >= 0
s3 = d + self.score_matrix[r][c - 1]
s = max(s1, s2, s3)
self.score_matrix[r][c] = s if s > -999 else 0
self.pred_matrix[r][c] += 1 if s == s1 else 0
self.pred_matrix[r][c] += 2 if s == s2 else 0
self.pred_matrix[r][c] += 4 if s == s3 else 0
def rec_best_score(self, x, y):
s = [0]
if x + y == 0:
return 1
d = self.pred_matrix[x][y]
if x == 0:
d = 4
if y == 0:
d = 2
if d & 1:
s.append(self.rec_best_score(x-1, y-1))
if d & 2:
s.append(self.rec_best_score(x-1, y))
if d & 4:
s.append(self.rec_best_score(x, y-1))
return sum(s)
"""
x A B C
x 0 1
A 2
C 3
B 4 5
ABC A--BC
ACB -ACB-
go up -> gap in first
go left -> gap in second
"""
def rec_alignments(self, r, c, prev1="", prev2=""):
s = []
if r + c == 0: # top left
return [(prev1, prev2)]
d = self.pred_matrix[r][c]
if r == 0:
d = 4
if c == 0:
d = 2
if d & 1: # diag
c1 = self.string1[c - 1]
c2 = self.string2[r - 1]
next1 = c1 + prev1
next2 = c2 + prev2
for i in self.rec_alignments(r-1, c-1, next1, next2):
s.append(i)
if d & 2: # top
c1 = '-'
c2 = self.string2[r - 1]
next1 = c1 + prev1
next2 = c2 + prev2
for i in self.rec_alignments(r-1, c, next1, next2):
s.append(i)
if d & 4: # left
c1 = self.string1[c - 1]
c2 = '-'
next1 = c1 + prev1
next2 = c2 + prev2
for i in self.rec_alignments(r, c-1, next1, next2):
s.append(i)
return s
def get_best_score(self):
"""
:return: the highest score for the aligned strings, int
"""
return self.score_matrix[-1][-1]
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score
"""
return self.rec_best_score(len(self.string2), len(self.string1))
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
return self.rec_alignments(len(self.string2), len(self.string1))
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as a list of lists
"""
return self.score_matrix
<file_sep>import numpy as np
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.alignments = []
self.align()
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
self.compute_score_matrix()
self.compute_alignments()
def compute_score_matrix(self):
"""
Initialize the score matrix.
Fill the first row and the first column with gap penalties,
then calculate the top score for each empty cell starting
from top left.
"""
for column in range(len(self.string1) + 1):
self.score_matrix[0, column] = self.gap_penalty * column
for row in range(len(self.string2) + 1):
self.score_matrix[row, 0] = self.gap_penalty * row
for row in range(1, len(self.string2) + 1):
for column in range(1, len(self.string1) + 1):
match = (
self.score_matrix[row - 1, column - 1]
+ self.substitution_matrix[self.string1[column - 1]][self.string2[row - 1]]
)
string1_insertion = (
self.score_matrix[row - 1, column] + self.gap_penalty
)
string2_insertion = (
self.score_matrix[row, column - 1] + self.gap_penalty
)
self.score_matrix[row, column] = max(
match, string1_insertion, string2_insertion
)
def compute_alignments(self):
string1_aligned = []
string2_aligned = []
row = len(self.string2)
column = len(self.string1)
self.alignments = self.process_cell(row,
column,
string1_aligned,
string2_aligned)
def process_cell(self, row, column, string1_aligned, string2_aligned):
results = []
if row == 0 and column == 0:
string1_aligned.reverse()
string2_aligned.reverse()
return [(''.join(string1_aligned), ''.join(string2_aligned))]
if row > 0 or column > 0:
if (row > 0 and
column > 0 and
self.score_matrix[row, column] == self.score_matrix[row - 1, column - 1]
+ self.substitution_matrix[self.string1[column - 1]][self.string2[row - 1]]):
results.extend(
self.process_cell(
row - 1,
column - 1,
string1_aligned + [self.string1[column - 1]],
string2_aligned + [self.string2[row - 1]]
)
)
if (row > 0 and
self.score_matrix[row, column] == self.score_matrix[row - 1][column]
+ self.gap_penalty):
# insertion into string1
results.extend(
self.process_cell(
row - 1,
column,
string1_aligned + ['-'],
string2_aligned + [self.string2[row - 1]]
)
)
if (column > 0 and
self.score_matrix[row, column] == self.score_matrix[row][column - 1]
+ self.gap_penalty):
# insertion into string2
results.extend(
self.process_cell(
row,
column - 1,
string1_aligned + [self.string1[column - 1]],
string2_aligned + ['-']
)
)
return results
def get_best_score(self):
"""
:return: the highest score for the aligned strings, int
"""
return self.score_matrix[len(self.string2)][len(self.string1)]
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score, int
"""
return len(self.alignments)
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
return self.alignments
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as a list of lists
"""
return self.score_matrix
<file_sep>##############
# Exercise 2.6
##############
from collections import Counter
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_sequences(self):
return self.__sequences
def get_counts(self):
return len(self.__sequences)
def get_average_length(self):
count = 0
for seq in self.__sequences:
count += len(seq)
return count / self.get_counts()
def read_fasta(self, path):
with open (path, 'r') as f:
seq = ''
sequence_started = False
for line in f:
if line.startswith('>') or line.startswith(';'):
if sequence_started:
self.add_sequence(seq)
seq = ''
sequence_started = False
continue
sequence_started = True
seq += line.strip()
self.add_sequence(seq)
def add_sequence(self, seq):
if seq.endswith('*'):
seq_pruned = seq[:-1]
self.__sequences.append(seq_pruned)
else:
self.__sequences.append(seq)
def get_abs_frequencies(self):
# return number of occurences not normalized by length
counts = Counter()
for seq in self.__sequences:
counts.update(Counter(seq))
return counts
def get_av_frequencies(self):
# return number of occurences normalized by length
total_acids = self.get_counts() * self.get_average_length()
counts = self.get_abs_frequencies()
for key in counts:
counts[key] /= total_acids
return counts
if __name__ == '__main__':
dist = AADist('tests/tests.fasta')
print(dist.get_sequences())
<file_sep># -*- coding: utf-8 -*-
"""
IMPORTANT!:
Before writing an email asking questions such as
'What does this input has to be like?' or
'What return value do you expect?' PLEASE read our
exercise sheet and the information in this template
carefully.
If something is still unclear, PLEASE talk to your
colleagues before writing an email!
If you experience technical issues or if you find a
bug we are happy to answer your questions. However,
in order to provide quick help in such cases we need
to avoid unnecessary emails such as the examples
shown above.
"""
# Tip: This module might be useful for parsing...
from Bio.PDB.Polypeptide import PPBuilder
from Bio.PDB.MMCIFParser import MMCIFParser
import numpy as np
############# Exercise 2: Protein Data Bank #############
# General remark: In our exercise every structure will have EXACTLY ONE model.
# This is true for nearly all X-Ray structures. NMR structures have several
# models.
class PDB_Parser:
def __init__(self, path):
'''
Initialize every PDB_Parser with a path to a structure-file in CIF format.
An example file is included in the repository (7ahl.cif).
Tip: Store the parsed structure in an object variable instead of parsing it
again & again ...
'''
parser = MMCIFParser()
self.structure = parser.get_structure("7ahl", path)
# 3.8 Chains
def get_number_of_chains(self):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object
variable
Return:
Number of chains in this structure as integer.
'''
return len(list(self.structure.get_chains()))
# 3.9 Sequence
def get_sequence(self, chain_id):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object
variable
chain_id: String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the amino acid sequence (single-letter alphabet!) of a given chain
(chain_id) in a Biopython.PDB structure as a string.
'''
return PPBuilder().build_peptides(self.structure[0][chain_id])[0].get_sequence()
# 3.10 Water molecules
def get_number_of_water_molecules(self, chain_id):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object
variable
chain_id: String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the number of water molecules of a given chain (chain_id) in a
Biopython.PDB structure as an integer.
'''
return len([residue.get_id()[0] for residue in self.structure[0][chain_id] if residue.get_id()[0] == 'W'])
# 3.11 C-Alpha distance
def get_ca_distance(self, chain_id_1, index_1, chain_id_2, index_2):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an
object variable
chain_id_1: String (usually in ['A','B', 'C' ...]. The number of chains
epends on the specific protein and the resulting structure)
index_1: index of a residue in a given chain in a Biopython.PDB
structure
chain_id_2: String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
index_2: index of a residue in a given chain in a Biopython.PDB
structure
chain_id_1 and index_1 describe precisely one residue in a PDB structure,
chain_id_2 and index_2 describe the second residue.
Return:
Return the C-alpha (!) distance between the two residues, described by
chain_id_1/index_1 and chain_id_2/index_2. Round the returned value via
int().
The reason for using two different chains as an input is that also the
distance between residues of different chains can be interesting.
Different chains in a PDB structure can either occur between two different
proteins (Heterodimers) or between different copies of the same protein
(Homodimers).
'''
residue_1 = self.structure[0][chain_id_1][index_1]
residue_2 = self.structure[0][chain_id_2][index_2]
return int(residue_1["CA"] - residue_2["CA"])
# 3.12 Contact Map
def get_contact_map(self, chain_id):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object
variable
chain_id: String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return a complete contact map (see description in exercise sheet) for a
given chain in a Biopython.PDB structure as numpy array. The values in the
matrix describe the c-alpha distance between all residues in a chain of a
Biopython.PDB structure. Only integer values of the distance have to be
given (see below).
'''
ca_list = PPBuilder().build_peptides(
self.structure[0][chain_id])[0].get_ca_list()
chain_len = len(ca_list)
contact_map = np.zeros((chain_len, chain_len), dtype=np.float32)
for i in range(chain_len):
for j in range(0, i):
contact_map[i][j] = ca_list[i] - ca_list[j]
contact_map[j][i] = contact_map[i][j]
return contact_map.astype(np.int64) # return rounded (integer) values
def get_avg_residue_bfactor(self, residue):
count = 0
b_factor = 0.0
for atom in residue:
atom_b_factor = atom.get_bfactor()
if atom_b_factor is not None:
b_factor += atom.get_bfactor()
count += 1
else:
return np.nan
return b_factor / count
# 3.13 B-Factors
def get_bfactors(self, chain_id):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object
variable
chain_id: String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the B-Factors for all residues in a chain of a Biopython.PDB
structure. The B-Factors describe the mobility of an atom or a residue.
In a Biopython.PDB structure B-Factors are given for each atom in a
residue. Calculate the mean B-Factor for a residue by averaging over the
B-Factor of all atoms in a residue. Sometimes B-Factors are not available
for a certain residue; (e.g. the residue was not resolved); insert np.nan
for those cases.
Finally normalize your B-Factors using Standard scores (zero mean, unit
variance). You have to use np.nanmean, np.nanvar etc. if you have nan
values in your array. The returned data structure has to be a numpy array
rounded again to integer.
'''
chain = [res for res in self.structure[0]
[chain_id] if res.get_id()[0] != 'W']
chain_len = len(chain)
b_factors = np.zeros(chain_len, dtype=np.float32)
index = 0
for residue in chain:
b_factors[index] = self.get_avg_residue_bfactor(residue)
index += 1
normed_b_factors = (b_factors - np.nanmean(b_factors, axis=0)
) / np.nanstd(b_factors, axis=0)
return normed_b_factors.astype(np.int64)
def main():
print('PDB parser class.')
pdb = PDB_Parser("tests/7ahl.cif")
print(pdb.get_ca_distance("A", 410, "E", 120))
return None
if __name__ == '__main__':
main()
<file_sep>##############
# Exercise 2.5
##############
from collections import deque
import re
import os
codonTable = {
'ATA': 'I', 'ATC': 'I', 'ATT': 'I', 'ATG': 'M',
'ACA': 'T', 'ACC': 'T', 'ACG': 'T', 'ACT': 'T',
'AAC': 'N', 'AAT': 'N', 'AAA': 'K', 'AAG': 'K',
'AGC': 'S', 'AGT': 'S', 'AGA': 'R', 'AGG': 'R',
'CTA': 'L', 'CTC': 'L', 'CTG': 'L', 'CTT': 'L',
'CCA': 'P', 'CCC': 'P', 'CCG': 'P', 'CCT': 'P',
'CAC': 'H', 'CAT': 'H', 'CAA': 'Q', 'CAG': 'Q',
'CGA': 'R', 'CGC': 'R', 'CGG': 'R', 'CGT': 'R',
'GTA': 'V', 'GTC': 'V', 'GTG': 'V', 'GTT': 'V',
'GCA': 'A', 'GCC': 'A', 'GCG': 'A', 'GCT': 'A',
'GAC': 'D', 'GAT': 'D', 'GAA': 'E', 'GAG': 'E',
'GGA': 'G', 'GGC': 'G', 'GGG': 'G', 'GGT': 'G',
'TCA': 'S', 'TCC': 'S', 'TCG': 'S', 'TCT': 'S',
'TTC': 'F', 'TTT': 'F', 'TTA': 'L', 'TTG': 'L',
'TAC': 'Y', 'TAT': 'Y', 'TAA': '_', 'TAG': '_',
'TGC': 'C', 'TGT': 'C', 'TGA': '_', 'TGG': 'W',
}
startCodon = set(['ATG'])
stopCodon = set(['TAA', 'TAG', 'TGA'])
codonLength = 3
minLength = 34
def find_orf(sequence, primary, frame):
sequence = sequence.upper()
L = len(sequence)
orf = ""
startIndex = deque()
orfs = []
for i in range(frame-1, L, codonLength):
codon = sequence[i:i+codonLength]
if len(codon) != 3:
continue
orfs += codonTable[codon]
if codon in startCodon:
startIndex.append(i)
if codon in stopCodon and len(startIndex) > 0:
index = startIndex.popleft()
zorf = orf[index//codonLength:i//codonLength]
if len(zorf) <= minLength:
continue
stopIndex = i+codonLength-1
if primary:
orfs.append((index, stopIndex, zorf))
else:
orfs.append((L-index-1, L-(stopIndex)-1, zorf))
if len(startIndex) > 0:
startIndex = deque([x for x in startIndex if x > stopIndex])
return orfs
def get_orfs(seq):
seq = seq.upper()
L = len(seq)
orfList = {}
#Checking DNA sequence
if re.match('^[ACGT]+$', seq) == None:
raise TypeError("It is not a DNA!")
#Primary Strand
for i in range(1,3):
orfs = find_orf(seq,True,i)
orfsList[i] = orfs
#Reverse Strand
reverseSeq = complement(seq[::-1])
for i in range(1,3):
orfs = find_orf(reverseSeq,False,i)
orfsList[i] = orfs
return orfList
def read_genome(file):
lines = open(file, "r")
genome = ""
for line in lines:
genome += line.strip()
lines.close()
return genome
relative_path = os.path.dirname(__file__)
genome = read_genome(relative_path + '/genome.txt')
genome = genome.upper()
genome_length = len(genome)
<file_sep>##############
# Exercise 2.6
##############
import os
from collections import Counter
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.filepath = filepath
self.read_fasta(filepath)
def get_counts(self):
counter = 0
with open(""+self.filepath, "r") as f:
for line in f:
if line.startswith(">"):
counter+=1
#print(counter)
return counter
def get_average_length(self):
counter = 0
with open(""+self.filepath, "r") as f:
for line in f:
if line.startswith(">") or line.startswith(";") or line.startswith(" "):
continue
line_no_spaces=line.strip()
counter += len(line_no_spaces.strip("*"))
print(counter)
n = self.get_counts()
return (counter/n)
def read_fasta(self, path):
with open(path, "r") as f:
seq=""
for line in f:
if line.startswith(">") or line.startswith(";") or line.startswith(" "):
continue
line_no_spaces=line.strip()
seq += line_no_spaces.strip("*")
#print(seq)
#print(seq)
return seq
def get_abs_frequencies(self):
# return number of occurences not normalized by length
seq = self.read_fasta(self.filepath)
counted_aa = Counter(seq)
#print(counted_aa)
return counted_aa
def get_av_frequencies(self):
# return number of occurences normalized by length
dict = self.get_abs_frequencies()
seq = self.read_fasta(self.filepath)
#print(dict)
#print(len)
for key in dict:
dict[key] /= len(seq)
#print(dict)
return dict
<file_sep>##############
# Exercise 2.6
##############
from collections import Counter
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = {}
self.read_fasta(filepath)
def get_counts(self):
return len(self.__sequences.keys())
def get_average_length(self):
count = 0
for key,val in self.__sequences.items():
count += len(val)
return count/len(self.__sequences.keys())
def read_fasta(self, path):
def fasta_helper(fp):
name, seq = None, []
for line in fp:
line = line.rstrip()
if line.startswith(">") or line.startswith(';'):
if name: yield (name, ''.join(seq))
name, seq = line, []
else:
seq.append(line)
if name: yield (name, ''.join(seq))
with open(path) as fp:
for name, seq in fasta_helper(fp):
if seq.endswith("*"):
seq = seq[:-1]
self.__sequences[name] = seq
def get_abs_frequencies(self):
# return number of occurences not normalized by length
amino_frequencies = {}
for key, val in self.__sequences.items():
countmaintained = Counter(val)
for k in countmaintained:
if k not in amino_frequencies:
amino_frequencies[k] = countmaintained[k]
else:
amino_frequencies[k] = amino_frequencies[k] + countmaintained[k]
return amino_frequencies
def get_av_frequencies(self):
# return number of occurences normalized by length
amino_frequencies = {}
for key, val in self.__sequences.items():
countmaintained = Counter(val)
for k in countmaintained:
if k not in amino_frequencies:
amino_frequencies[k] = countmaintained[k]
else:
amino_frequencies[k] = amino_frequencies[k] + countmaintained[k]
total = sum(amino_frequencies.values())
amino_frequencies_norm = {}
for key in amino_frequencies:
amino_frequencies_norm[key] = amino_frequencies[key] / total
return amino_frequencies_norm
<file_sep># -*- coding: utf-8 -*-
"""
IMPORTANT!:
Before writing an email asking questions such as
'What does this input has to be like?' or
'What return value do you expect?' PLEASE read our
exercise sheet and the information in this template
carefully.
If something is still unclear, PLEASE talk to your
colleagues before writing an email!
If you experience technical issues or if you find a
bug we are happy to answer your questions. However,
in order to provide quick help in such cases we need
to avoid unnecessary emails such as the examples
shown above.
"""
from Bio.PDB.MMCIFParser import MMCIFParser # Tip: This module might be useful for parsing...
from Bio.PDB.Polypeptide import PPBuilder
import numpy as np
############# Exercise 2: Protein Data Bank #############
# General remark: In our exercise every structure will have EXACTLY ONE model.
# This is true for nearly all X-Ray structures. NMR structures have several models.
class PDB_Parser:
CIF_PARSER = MMCIFParser(QUIET=True) # parser object for reading in structure in CIF format
PPB=PPBuilder()
def __init__( self, path ):
'''
Initialize every PDB_Parser with a path to a structure-file in CIF format.
An example file is included in the repository (7ahl.cif).
Tip: Store the parsed structure in an object variable instead of parsing it
again & again ...
'''
self.structure = self.CIF_PARSER.get_structure("cif", path)
# 3.8 Chains
def get_number_of_chains( self ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
Return:
Number of chains in this structure as integer.
'''
return sum(1 for i in self.structure.get_chains())
# 3.9 Sequence
def get_sequence( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the amino acid sequence (single-letter alphabet!) of a given chain (chain_id)
in a Biopython.PDB structure as a string.
'''
for chain in self.structure.get_chains():
if chain.id == chain_id:
for pp in self.PPB.build_peptides(chain):
return pp.get_sequence()
# 3.10 Water molecules
def get_number_of_water_molecules( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the number of water molecules of a given chain (chain_id)
in a Biopython.PDB structure as an integer.
'''
for chain in self.structure.get_chains():
if chain.id == chain_id:
return sum(1 for residue in chain.get_residues() if "HOH" in residue.get_resname())
# 3.11 C-Alpha distance
def get_ca_distance( self, chain_id_1, index_1, chain_id_2, index_2 ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id_1 : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
index_1 : index of a residue in a given chain in a Biopython.PDB structure
chain_id_2 : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
index_2 : index of a residue in a given chain in a Biopython.PDB structure
chain_id_1 and index_1 describe precisely one residue in a PDB structure,
chain_id_2 and index_2 describe the second residue.
Return:
Return the C-alpha (!) distance between the two residues, described by
chain_id_1/index_1 and chain_id_2/index_2. Round the returned value via int().
The reason for using two different chains as an input is that also the distance
between residues of different chains can be interesting.
Different chains in a PDB structure can either occur between two different proteins
(Heterodimers) or between different copies of the same protein (Homodimers).
'''
residue_1 = ""
residue_2 = ""
for chain in self.structure.get_chains():
if chain.id == chain_id_1:
residue_1 = chain[index_1]
for chain in self.structure.get_chains():
if chain.id == chain_id_2:
residue_2 = chain[index_2]
return int( abs(residue_1["CA"] - residue_2["CA"]) )
def calc_dist_matrix(self, chain_residue_1, chain_residue_2):
"""Returns a matrix of C-alpha distances between two chains"""
answer = np.zeros((len(chain_residue_1), len(chain_residue_2)), np.float)
i = 0
for residue_one in chain_residue_1:
j = 0
for residue_two in chain_residue_2:
answer[i, j] = abs(residue_one["CA"] - residue_two["CA"])
j = j + 1
i = i + 1
return answer
# 3.12 Contact Map
def get_contact_map( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return a complete contact map (see description in exercise sheet)
for a given chain in a Biopython.PDB structure as numpy array.
The values in the matrix describe the c-alpha distance between all residues
in a chain of a Biopython.PDB structure.
Only integer values of the distance have to be given (see below).
'''
residues = []
for chain in self.structure.get_chains():
if chain.id == chain_id:
for residue in chain.get_residues():
if "HOH" not in residue.get_resname():
residues.append(residue)
break
contact_map = self.calc_dist_matrix(residues, residues)
return contact_map.astype( np.int ) # return rounded (integer) values
# 3.13 B-Factors
def get_bfactors( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the B-Factors for all residues in a chain of a Biopython.PDB structure.
The B-Factors describe the mobility of an atom or a residue.
In a Biopython.PDB structure B-Factors are given for each atom in a residue.
Calculate the mean B-Factor for a residue by averaging over the B-Factor
of all atoms in a residue.
Sometimes B-Factors are not available for a certain residue;
(e.g. the residue was not resolved); insert np.nan for those cases.
Finally normalize your B-Factors using Standard scores (zero mean, unit variance).
You have to use np.nanmean, np.nanvar etc. if you have nan values in your array.
The returned data structure has to be a numpy array rounded again to integer.
'''
residues = []
residues_bfac = []
for chain in self.structure.get_chains():
if chain.id == chain_id:
for residue in chain.get_residues():
if "HOH" not in residue.get_resname():
residues.append(residue)
break
for residue in residues:
avg_bfac = 0
length = len(list(residue.get_atoms()))
for atom in residue.get_atoms():
if atom.get_bfactor() != None:
avg_bfac += atom.get_bfactor()
if avg_bfac != 0:
avg_bfac /= length
residues_bfac.append(avg_bfac)
b_factors = np.array( residues_bfac, dtype=np.float32 )
b_factors = (b_factors - b_factors.mean(axis=0)) / b_factors.std(axis=0)
return b_factors.astype( np.int ) # return rounded (integer) values
def main():
print('PDB parser class.')
obj = PDB_Parser("./tests/7ahl.cif")
obj.get_bfactors("C")
return None
if __name__ == '__main__':
main()<file_sep>##############
# Exercise 2.7
##############
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isCharged(aa):
charged = 'DERHK'
if aa.upper() in charged:
return True
return False
def isPositivelyCharged(aa):
pCharged = 'RKH'
if aa.upper() in pCharged:
return True
return False
def isNegativelyCharged(aa):
nCharged = 'ED'
if aa.upper() in nCharged:
return True
return False
def isHydrophobic(aa):
hydro = 'AILMVFWY'
if aa.upper() in hydro:
return True
return False
def isAromatic(aa):
ar = 'FWYH'
if aa.upper() in ar:
return True
return False
def isPolar(aa):
polar = 'HRNDQEKSTY'
if aa.upper() in polar:
return True
return False
def isProline(aa):
if aa.upper() == 'P':
return True
return False
def containsSulfur(aa):
sulfur = 'CM'
if aa.upper() in sulfur:
return True
return False
def isAcid(aa):
acid = 'DE'
if aa.upper() in acid:
return True
return False
def isBasic(aa):
basic = 'RHK'
if aa.upper() in basic:
return True
return False
<file_sep>import numpy as np
from pathlib import Path
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.sequences = []
self.word_counter = [] # counts number of words per sequence, same order/indices as self.sequences
self.word_to_seq = {} # key is word, value is list of indices of sequences
self.word_indices = {} # key is word, value is dictionary with key being target, value being list of indices of respective word
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
idx = len(self.sequences)
self.sequences.append(sequence)
current_words = set()
for i in range(len(sequence)-2):
word = sequence[i:i+3]
if not word in self.word_indices.keys():
self.word_indices[word] = {}
if not sequence in self.word_indices[word].keys():
self.word_indices[word][sequence] = []
if not i in self.word_indices[word][sequence]:
self.word_indices[word][sequence].append(i)
if not word in current_words:
if not word in self.word_to_seq.keys():
self.word_to_seq[word] = {idx}
else:
self.word_to_seq[word].add(idx)
current_words.add(word)
self.word_counter.append(len(current_words))
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
if not word in self.word_to_seq.keys():
return []
return [self.sequences[i] for i in self.word_to_seq[word]]
def get_sequences_supercharged(self, word):
"""
Return indices of word for all sequences.
:param word: a word (string).
:return: Dictionary, containing sequences as keys and list of indices as values.
"""
return self.word_indices[word]
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
word_seq_avg = np.rint(np.mean(self.word_counter))
seq_word_avg = np.rint(np.mean([len(self.word_to_seq[k]) for k in self.word_to_seq.keys()]))
return tuple((len(self.sequences), len(self.word_to_seq.keys()), word_seq_avg, seq_word_avg))
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.all_3mers = []
for i in range(len(ALPHABET)):
for j in range(len(ALPHABET)):
for k in range(len(ALPHABET)):
self.all_3mers.append(INT_TO_AA[i] + INT_TO_AA[j] + INT_TO_AA[k])
self.subst_matrix = substitution_matrix
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
words = list(self.all_3mers) # copy full list of 3-mers and remove already accepted ones in the loop -> slightly faster than comparing with all words all the time
accepted = []
for i in range(self.get_length(sequence,pssm)-2):
for word in list(words):
score = self.score(sequence, pssm, word, i, 0, 3)
if score >= T:
accepted.append(word)
words.remove(word)
return accepted
def get_words_supercharged(self, *, sequence=None, pssm=None, T=11):
"""
For all sequence positions, return all words with their score if score >=T.
Only a sequence or PSSM will be provided, not both at the same time.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: Dictionary with sequence positions as keys and list of tuples as values, containing (word, score).
"""
result = {}
for i in range(self.get_length(sequence,pssm)-2):
accepted = []
for word in self.all_3mers:
score = self.score(sequence, pssm, word, i, 0, 3)
if score >= T:
accepted.append((word,score))
result[i] = accepted
return result
def get_length(self, query, pssm):
length = 0
if not query is None:
length = len(query)
else:
length = pssm.shape[0]
return length
def score(self, query, pssm, target, q_start, t_start, length):
score = 0
if not query is None:
for i in range(length):
score += self.subst_matrix[AA_TO_INT[target[i+t_start]]][AA_TO_INT[query[i+q_start]]]
else:
for i in range(length):
score += pssm[i+q_start][AA_TO_INT[target[i+t_start]]]
return score
def extend_right(self, query, pssm, target, seq_idx_start, tar_idx_start, offset_last, max_score, X):
hsp = (seq_idx_start, tar_idx_start, offset_last + 1, max_score)
seq_idx_end = seq_idx_start + offset_last
# Right extension
current_score = max_score
seq_idx = seq_idx_end
tar_idx = tar_idx_start + offset_last
while seq_idx + 1 < self.get_length(query, pssm) and tar_idx + 1 < len(target):
current_score += self.score(query, pssm, target, seq_idx + 1, tar_idx + 1, 1)
if current_score > max_score:
max_score = current_score
seq_idx_end = seq_idx + 1
hsp = (seq_idx_start, tar_idx_start, seq_idx_end - seq_idx_start + 1, max_score)
if current_score <= max_score - X:
break
seq_idx += 1
tar_idx += 1
return hsp
def extend_left(self, query, pssm, target, seq_idx_start, tar_idx_start, offset_last, max_score, X):
hsp = (seq_idx_start, tar_idx_start, offset_last + 1, max_score)
seq_idx_end = seq_idx_start + offset_last
# Left extension
current_score = max_score
seq_idx = seq_idx_start
tar_idx = tar_idx_start
while seq_idx - 1 >= 0 and tar_idx - 1 >= 0:
current_score += self.score(query, pssm, target, seq_idx - 1, tar_idx - 1, 1)
if current_score > max_score:
max_score = current_score
seq_idx_start = seq_idx - 1
hsp = (seq_idx - 1, tar_idx - 1, seq_idx_end - seq_idx_start + 1, max_score)
if current_score <= max_score - X:
break
seq_idx -= 1
tar_idx -= 1
return hsp
def two_hit_extension(self, query, pssm, target, seq_idx_first, tar_idx_first, offset_second, X, S):
score = self.score(query, pssm, target, seq_idx_first + offset_second, tar_idx_first + offset_second, 3)
hsp = self.extend_left(query, pssm, target, seq_idx_first+offset_second, tar_idx_first+offset_second, 2, score, X)
if hsp[0] > seq_idx_first+2:
return None
hsp = self.extend_right(query, pssm, target, hsp[0], hsp[1], hsp[2]-1, hsp[3], X)
if hsp[3] >= S:
return hsp
return None
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
hsps = dict()
word_dict = self.get_words_supercharged(sequence=query, pssm=pssm, T=T)
for seq_idx in range(self.get_length(query,pssm)-2):
for (word,score) in word_dict[seq_idx]:
index_dict = blast_db.get_sequences_supercharged(word)
for target, hit_list in index_dict.items():
for hit in hit_list:
hsp = self.extend_right(query, pssm, target, seq_idx, hit, 2, score, X)
hsp = self.extend_left(query, pssm, target, hsp[0], hsp[1], hsp[2] - 1, hsp[3], X)
if hsp[3] < S:
continue
if not target in hsps.keys():
hsps[target] = []
if not hsp in hsps[target]:
hsps[target].append(hsp)
return hsps
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
hsps = {}
word_dict = self.get_words_supercharged(sequence=query, pssm=pssm, T=T)
hits = {}
for seq_idx in range(self.get_length(query,pssm)-2):
for (word,score) in word_dict[seq_idx]:
index_dict = blast_db.get_sequences_supercharged(word)
for target, hit_list in index_dict.items():
if not target in hits.keys():
hits[target] = {}
for hit in hit_list:
diag = hit - seq_idx
if not diag in hits[target].keys():
hits[target][diag] = []
hits[target][diag].append((seq_idx, hit))
for target,diagonals in hits.items():
for diag_idx in diagonals:
hit_list = hits[target][diag_idx]
i = 0
while i < len(hit_list) - 1:
j = 1
offset = 0
while i+j < len(hit_list):
offset = hit_list[i + j][0] - hit_list[i][0]
if offset > 2:
break
j += 1
hsp = None
if offset > 2 and offset <= A:
hsp = self.two_hit_extension(query, pssm, target, hit_list[i][0], hit_list[i][1], offset, X, S)
if not hsp is None:
if not target in hsps.keys():
hsps[target] = []
if not hsp in hsps[target]:
hsps[target].append(hsp)
while i < len(hit_list) and hit_list[i][0] < hsp[0]+hsp[2]:
i += 1
else:
i += 1
return hsps<file_sep>##############
# Exercise 2.7
##############
posCharged=["R","K","H"]
negCharged=["D","E"]
#isHydro=["M","L","V","I","A","F","C"]
isHydro=["M","L","V","I","A","F","Y","W"]
isAro=["F","W","Y","H"]
isPo=["R","N","D","Q","E","H","K","S","T","Y"]
isPro=["P"]
conSulf=["C","M"]
isAc=["D","E"]
isBa=["R","H","K"]
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
if aa in posCharged:
return True
else:
return False
def isNegativelyCharged(aa):
if aa in negCharged:
return True
else:
return False
def isHydrophobic(aa):
return aa in isHydro
def isAromatic(aa):
return aa in isAro
def isPolar(aa):
return aa in isPo
def isProline(aa):
return aa in isPro
def containsSulfur(aa):
return aa in conSulf
def isAcid(aa):
return aa in isAc
def isBasic(aa):
return aa in isBa<file_sep>import numpy as np
from pathlib import Path
from collections import Counter
from itertools import product
import re
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.sequences = []
self.words = Counter()
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
words = set(sequence[i:i+3] for i in range(0, len(sequence)) if i+3 <= len(sequence))
self.words.update(words)
self.sequences.append((sequence, words))
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
return [seq for (seq, seq_words) in self.sequences if word in seq_words]
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
avg_words = np.average(list(len(seq_words) for (_, seq_words) in self.sequences))
avg_seqs_per_word = np.average(list(self.words.values()))
return (len(self.sequences),
len(self.words),
np.rint(avg_words).astype(np.int64),
np.rint(avg_seqs_per_word).astype(np.int64))
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.subst_matrix = substitution_matrix # scoring matrix
def get_words(self, *, sequence=None, pssm=None, T=11, with_pos=False):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
result = set()
words = [a+b+c for (a,b,c) in product(ALPHABET, ALPHABET, ALPHABET)]
length = len(sequence) if sequence is not None else len(pssm)
for idx in range(0, length-2):
for word in words:
if sequence is not None:
score = sum(self.subst_matrix[AA_TO_INT[sequence[idx+i]]][AA_TO_INT[word[i]]]
for i in range(0,3))
elif pssm is not None:
score = sum(pssm[idx+i][AA_TO_INT[word[i]]]
for i in range(0,3))
if score >= T:
if with_pos:
result.add((word, idx))
else:
result.add(word)
return result
def find_hsp(self, X, t_seq, q_word_idx, t_word_idx, query=None, pssm=None):
def calc_score(q_idx, t_idx):
"""Calculates the score of query index and target index"""
if query is not None:
return self.subst_matrix[AA_TO_INT[query[q_idx]]][AA_TO_INT[t_seq[t_idx]]]
if pssm is not None:
return pssm[q_idx][AA_TO_INT[t_seq[t_idx]]]
q_length = len(query if query is not None else pssm)
score = sum(calc_score(q_word_idx+i, t_word_idx+i)
for i in range(0,3))
# Extend right
r_offset = 2 # index of last position included
r_best = (score, r_offset)
while score > r_best[0] - X:
r_offset += 1
q_idx = q_word_idx + r_offset
t_idx = t_word_idx + r_offset
if q_idx >= q_length or t_idx >= len(t_seq):
r_offset -= 1
break
score += calc_score(q_idx, t_idx)
if score > r_best[0]:
r_best = (score, r_offset)
# Extend left
score = r_best[0]
l_offset = 0
l_best = (score, l_offset)
while score > l_best[0] - X:
l_offset -= 1
q_idx = q_word_idx + l_offset
t_idx = t_word_idx + l_offset
if q_idx < 0 or t_idx < 0:
l_offset += 1
break
score += calc_score(q_idx, t_idx)
if score > l_best[0]:
l_best = (score, l_offset)
length = (-l_best[1]) + r_best[1] + 1
total_score = l_best[0]
result = (q_word_idx+l_best[1], t_word_idx+l_best[1], length, total_score)
return result
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
def find_word_in_seq(seq, word):
"""Returns list of start indices"""
return [m.start() for m in re.finditer("(?={})".format(word), seq)]
result = {}
# Find all words that could match the query sequence with their
# positions in the query sequence.
q_words = self.get_words(sequence=query, T=T, with_pos=True) if query is not None else \
self.get_words(pssm=pssm, T=T, with_pos=True)
for (q_word, q_word_pos) in q_words:
# Find all DB sequences that contain the query word
t_seqs = blast_db.get_sequences(q_word)
for t_seq in t_seqs:
if not t_seq in result:
result[t_seq] = set()
# Find all occurences of the query word in the target sequence
t_words_idc = find_word_in_seq(t_seq, q_word)
for (q_word_idx, t_word_idx) in product([q_word_pos], t_words_idc):
if query is not None:
hsp = self.find_hsp(X, t_seq, q_word_idx, t_word_idx, query=query)
else:
hsp = self.find_hsp(X, t_seq, q_word_idx, t_word_idx, pssm=pssm)
if hsp[3] >= S:
result[t_seq].add(hsp)
for (seq, hsps) in list(result.items()):
if len(hsps) == 0:
del result[seq]
return result
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]
<file_sep>##############
# Exercise 2.6
##############
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
return len(self.__sequences)
def get_average_length(self):
aa_fr = self.get_abs_frequencies()
return sum(aa_fr[k] for k in aa_fr) / len(self.__sequences)
#return sum(len(seq) for seq in self.__sequences) / float(len(self.__sequences))
def read_fasta(self, path):
with open(path, "r") as f:
seq = ""
sequence_started = False
for line in f:
if line.startswith(">") or line.startswith(";"):
if sequence_started:
self.__sequences.append(seq)
seq = ""
sequence_started = False
continue
sequence_started = True
seq += line.strip()
self.__sequences.append(seq)
def get_abs_frequencies(self):
# return number of occurences not normalized by length
abs_fr = {
'A': 0,
'R': 0,
'N': 0,
'D': 0,
'C': 0,
'E': 0,
'Q': 0,
'G': 0,
'H': 0,
'I': 0,
'L': 0,
'K': 0,
'M': 0,
'F': 0,
'P': 0,
'S': 0,
'T': 0,
'W': 0,
'Y': 0,
'V': 0,
}
for seq in self.__sequences:
for aa in seq:
if aa in abs_fr:
abs_fr[aa] += 1
return abs_fr
def get_av_frequencies(self):
# return number of occurences normalized by length
aa_fr = {
'A': 0,
'R': 0,
'N': 0,
'D': 0,
'C': 0,
'E': 0,
'Q': 0,
'G': 0,
'H': 0,
'I': 0,
'L': 0,
'K': 0,
'M': 0,
'F': 0,
'P': 0,
'S': 0,
'T': 0,
'W': 0,
'Y': 0,
'V': 0,
}
for seq in self.__sequences:
for aa in seq:
if aa in aa_fr:
aa_fr[aa] += 1
aa_sum = sum(aa_fr[k] for k in aa_fr)
for key in aa_fr:
aa_fr[key] = aa_fr[key] / aa_sum
return aa_fr
<file_sep>import numpy as np
from copy import deepcopy
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.alignments = []
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.backtrace_matrix = [[[] for i in range(len(string1) + 1)] for j in range(len(string2) + 1)]
self.align()
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
len_str_1 = len(self.string1)
len_str_2 = len(self.string2)
# Initialize first row and column with gap penalties
self.score_matrix[0, :] = np.arange(len_str_1 + 1) * self.gap_penalty
self.score_matrix[:, 0] = np.arange(len_str_2 + 1) * self.gap_penalty
for i in range(1, len_str_2 + 1):
self.backtrace_matrix[i][0] = [[i - 1, 0]]
for i in range(1, len_str_1 + 1):
self.backtrace_matrix[0][i] = [[0, i - 1]]
# Find alignments
for i2 in range(1, len_str_2 + 1):
for i1 in range(1, len_str_1 + 1):
coordinates = [[i2-1, i1-1],
[i2-1, i1],
[i2, i1-1]]
scores = [self.score_matrix[i2-1, i1-1] +
self.substitution_matrix[self.string1[i1-1]][self.string2[i2-1]],
self.score_matrix[i2-1, i1] + self.gap_penalty,
self.score_matrix[i2, i1-1] + self.gap_penalty]
max_score = max(scores)
self.score_matrix[i2, i1] = max(scores)
for i_score, score in enumerate(scores):
if score == max_score:
self.backtrace_matrix[i2][i1].append(coordinates[i_score])
self.alignments = self.backtrace(index=[len_str_2, len_str_1], parents=self.backtrace_matrix[-1][-1], in_alignment=['', ''])
# Unpack alignments
self.alignments = [[self.alignments[i], self.alignments[i+1]] for i in range(0, len(self.alignments), 2)]
pass
def backtrace(self, index, parents, in_alignment):
alignments = []
if not parents:
return [x[::-1] for x in in_alignment]
else:
for parent in parents:
alignment = deepcopy(in_alignment)
if parent[0] == index[0] - 1 and parent[1] == index[1] - 1:
alignment[0] += self.string1[index[1]-1]
alignment[1] += self.string2[index[0]-1]
elif parent[0] == index[0] -1:
alignment[0] += '-'
alignment[1] += self.string2[index[0]-1]
elif parent[1] == index[1] - 1:
alignment[0] += self.string1[index[1]-1]
alignment[1] += '-'
alignments.extend(self.backtrace(parent, self.backtrace_matrix[parent[0]][parent[1]], alignment))
return alignments
def get_best_score(self):
"""
:return: the highest score for the aligned strings, int
"""
self.scores = []
for alignment in self.alignments:
score = 0
for i in range(len(alignment[0])):
if alignment[0][i] == '-' or alignment[1][i] == '-':
score += self.gap_penalty
else:
score += self.substitution_matrix[alignment[0][i]][alignment[1][i]]
self.scores.append(score)
return max(self.scores)
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score
"""
return self.scores.count(max(self.scores))
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
return [tuple(alignment) for alignment in self.alignments]
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as a list of lists
"""
return self.score_matrix.tolist()
if __name__ == '__main__':
blosum = {
'A': {'A': 4, 'C': 0, 'B': -2, 'E': -1, 'D': -2, 'G': 0, 'F': -2, 'I': -1, 'H': -2, 'K': -1, 'M': -1, 'L': -1, 'N': -2, 'Q': -1, 'P': -1, 'S': 1, 'R': -1, 'T': 0, 'W': -3, 'V': 0, 'Y': -2, 'X': 0, 'Z': -1},
'C': {'A': 0, 'C': 9, 'B': -3, 'E': -4, 'D': -3, 'G': -3, 'F': -2, 'I': -1, 'H': -3, 'K': -3, 'M': -1, 'L': -1, 'N': -3, 'Q': -3, 'P': -3, 'S': -1, 'R': -3, 'T': -1, 'W': -2, 'V': -1, 'Y': -2, 'X': -2, 'Z': -3},
'B': {'A': -2, 'C': -3, 'B': 4, 'E': 1, 'D': 4, 'G': -1, 'F': -3, 'I': -3, 'H': 0, 'K': 0, 'M': -3, 'L': -4, 'N': 3, 'Q': 0, 'P': -2, 'S': 0, 'R': -1, 'T': -1, 'W': -4, 'V': -3, 'Y': -3, 'X': -1, 'Z': 1},
'E': {'A': -1, 'C': -4, 'B': 1, 'E': 5, 'D': 2, 'G': -2, 'F': -3, 'I': -3, 'H': 0, 'K': 1, 'M': -2, 'L': -3, 'N': 0, 'Q': 2, 'P': -1, 'S': 0, 'R': 0, 'T': -1, 'W': -3, 'V': -2, 'Y': -2, 'X': -1, 'Z': 4},
'D': {'A': -2, 'C': -3, 'B': 4, 'E': 2, 'D': 6, 'G': -1, 'F': -3, 'I': -3, 'H': -1, 'K': -1, 'M': -3, 'L': -4, 'N': 1, 'Q': 0, 'P': -1, 'S': 0, 'R': -2, 'T': -1, 'W': -4, 'V': -3, 'Y': -3, 'X': -1, 'Z': 1},
'G': {'A': 0, 'C': -3, 'B': -1, 'E': -2, 'D': -1, 'G': 6, 'F': -3, 'I': -4, 'H': -2, 'K': -2, 'M': -3, 'L': -4, 'N': 0, 'Q': -2, 'P': -2, 'S': 0, 'R': -2, 'T': -2, 'W': -2, 'V': -3, 'Y': -3, 'X': -1, 'Z': -2},
'F': {'A': -2, 'C': -2, 'B': -3, 'E': -3, 'D': -3, 'G': -3, 'F': 6, 'I': 0, 'H': -1, 'K': -3, 'M': 0, 'L': 0, 'N': -3, 'Q': -3, 'P': -4, 'S': -2, 'R': -3, 'T': -2, 'W': 1, 'V': -1, 'Y': 3, 'X': -1, 'Z': -3},
'I': {'A': -1, 'C': -1, 'B': -3, 'E': -3, 'D': -3, 'G': -4, 'F': 0, 'I': 4, 'H': -3, 'K': -3, 'M': 1, 'L': 2, 'N': -3, 'Q': -3, 'P': -3, 'S': -2, 'R': -3, 'T': -1, 'W': -3, 'V': 3, 'Y': -1, 'X': -1, 'Z': -3},
'H': {'A': -2, 'C': -3, 'B': 0, 'E': 0, 'D': -1, 'G': -2, 'F': -1, 'I': -3, 'H': 8, 'K': -1, 'M': -2, 'L': -3, 'N': 1, 'Q': 0, 'P': -2, 'S': -1, 'R': 0, 'T': -2, 'W': -2, 'V': -3, 'Y': 2, 'X': -1, 'Z': 0},
'K': {'A': -1, 'C': -3, 'B': 0, 'E': 1, 'D': -1, 'G': -2, 'F': -3, 'I': -3, 'H': -1, 'K': 5, 'M': -1, 'L': -2, 'N': 0, 'Q': 1, 'P': -1, 'S': 0, 'R': 2, 'T': -1, 'W': -3, 'V': -2, 'Y': -2, 'X': -1, 'Z': 1},
'M': {'A': -1, 'C': -1, 'B': -3, 'E': -2, 'D': -3, 'G': -3, 'F': 0, 'I': 1, 'H': -2, 'K': -1, 'M': 5, 'L': 2, 'N': -2, 'Q': 0, 'P': -2, 'S': -1, 'R': -1, 'T': -1, 'W': -1, 'V': 1, 'Y': -1, 'X': -1, 'Z': -1},
'L': {'A': -1, 'C': -1, 'B': -4, 'E': -3, 'D': -4, 'G': -4, 'F': 0, 'I': 2, 'H': -3, 'K': -2, 'M': 2, 'L': 4, 'N': -3, 'Q': -2, 'P': -3, 'S': -2, 'R': -2, 'T': -1, 'W': -2, 'V': 1, 'Y': -1, 'X': -1, 'Z': -3},
'N': {'A': -2, 'C': -3, 'B': 3, 'E': 0, 'D': 1, 'G': 0, 'F': -3, 'I': -3, 'H': 1, 'K': 0, 'M': -2, 'L': -3, 'N': 6, 'Q': 0, 'P': -2, 'S': 1, 'R': 0, 'T': 0, 'W': -4, 'V': -3, 'Y': -2, 'X': -1, 'Z': 0},
'Q': {'A': -1, 'C': -3, 'B': 0, 'E': 2, 'D': 0, 'G': -2, 'F': -3, 'I': -3, 'H': 0, 'K': 1, 'M': 0, 'L': -2, 'N': 0, 'Q': 5, 'P': -1, 'S': 0, 'R': 1, 'T': -1, 'W': -2, 'V': -2, 'Y': -1, 'X': -1, 'Z': 3},
'P': {'A': -1, 'C': -3, 'B': -2, 'E': -1, 'D': -1, 'G': -2, 'F': -4, 'I': -3, 'H': -2, 'K': -1, 'M': -2, 'L': -3, 'N': -2, 'Q': -1, 'P': 7, 'S': -1, 'R': -2, 'T': -1, 'W': -4, 'V': -2, 'Y': -3, 'X': -2, 'Z': -1},
'S': {'A': 1, 'C': -1, 'B': 0, 'E': 0, 'D': 0, 'G': 0, 'F': -2, 'I': -2, 'H': -1, 'K': 0, 'M': -1, 'L': -2, 'N': 1, 'Q': 0, 'P': -1, 'S': 4, 'R': -1, 'T': 1, 'W': -3, 'V': -2, 'Y': -2, 'X': 0, 'Z': 0},
'R': {'A': -1, 'C': -3, 'B': -1, 'E': 0, 'D': -2, 'G': -2, 'F': -3, 'I': -3, 'H': 0, 'K': 2, 'M': -1, 'L': -2, 'N': 0, 'Q': 1, 'P': -2, 'S': -1, 'R': 5, 'T': -1, 'W': -3, 'V': -3, 'Y': -2, 'X': -1, 'Z': 0},
'T': {'A': 0, 'C': -1, 'B': -1, 'E': -1, 'D': -1, 'G': -2, 'F': -2, 'I': -1, 'H': -2, 'K': -1, 'M': -1, 'L': -1, 'N': 0, 'Q': -1, 'P': -1, 'S': 1, 'R': -1, 'T': 5, 'W': -2, 'V': 0, 'Y': -2, 'X': 0, 'Z': -1},
'W': {'A': -3, 'C': -2, 'B': -4, 'E': -3, 'D': -4, 'G': -2, 'F': 1, 'I': -3, 'H': -2, 'K': -3, 'M': -1, 'L': -2, 'N': -4, 'Q': -2, 'P': -4, 'S': -3, 'R': -3, 'T': -2, 'W': 11, 'V': -3, 'Y': 2, 'X': -2, 'Z': -3},
'V': {'A': 0, 'C': -1, 'B': -3, 'E': -2, 'D': -3, 'G': -3, 'F': -1, 'I': 3, 'H': -3, 'K': -2, 'M': 1, 'L': 1, 'N': -3, 'Q': -2, 'P': -2, 'S': -2, 'R': -3, 'T': 0, 'W': -3, 'V': 4, 'Y': -1, 'X': -1, 'Z': -2},
'Y': {'A': -2, 'C': -2, 'B': -3, 'E': -2, 'D': -3, 'G': -3, 'F': 3, 'I': -1, 'H': 2, 'K': -2, 'M': -1, 'L': -1, 'N': -2, 'Q': -1, 'P': -3, 'S': -2, 'R': -2, 'T': -2, 'W': 2, 'V': -1, 'Y': 7, 'X': -1, 'Z': -2},
'X': {'A': 0, 'C': -2, 'B': -1, 'E': -1, 'D': -1, 'G': -1, 'F': -1, 'I': -1, 'H': -1, 'K': -1, 'M': -1, 'L': -1, 'N': -1, 'Q': -1, 'P': -2, 'S': 0, 'R': -1, 'T': 0, 'W': -2, 'V': -1, 'Y': -1, 'X': -1, 'Z': -1},
'Z': {'A': -1, 'C': -3, 'B': 1, 'E': 4, 'D': 1, 'G': -2, 'F': -3, 'I': -3, 'H': 0, 'K': 1, 'M': -1, 'L': -3, 'N': 0, 'Q': 3, 'P': -1, 'S': 0, 'R': 0, 'T': -1, 'W': -3, 'V': -2, 'Y': -2, 'X': -1, 'Z': 4}
}
identity = {
'A': {'A': 1, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0,
'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'R': {'A': 0, 'R': 1, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0,
'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'N': {'A': 0, 'R': 0, 'N': 1, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0,
'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'D': {'A': 0, 'R': 0, 'N': 0, 'D': 1, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0,
'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'C': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 1, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0,
'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'E': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 1, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0,
'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'Q': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 1, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0,
'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'G': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 1, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0,
'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'H': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 1, 'I': 0, 'L': 0, 'K': 0, 'M': 0,
'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'I': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 1, 'L': 0, 'K': 0, 'M': 0,
'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'L': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 1, 'K': 0, 'M': 0,
'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'K': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 1, 'M': 0,
'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'M': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 1,
'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'F': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0,
'F': 1, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'P': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0,
'F': 0, 'P': 1, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'S': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0,
'F': 0, 'P': 0, 'S': 1, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'T': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0,
'F': 0, 'P': 0, 'S': 0, 'T': 1, 'W': 0, 'Y': 0, 'V': 0},
'W': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0,
'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 1, 'Y': 0, 'V': 0},
'Y': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0,
'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 1, 'V': 0},
'V': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0,
'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 1}
}
alignment = GlobalAlignment("CRYVPST", "WYVPSAT", -1, identity)
print("Best score: {}".format(alignment.get_best_score()))
print("Number of best alignments: {}".format(alignment.get_number_of_alignments()))
alignment.get_alignments()
alignment.get_score_matrix()
<file_sep>import numpy as np
from copy import deepcopy
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.aligned_residues_1 = []
self.aligned_residues_2 = []
self.align()
def align(self):
"""
aAlign given strings using the Smith-Waterman algorithm.
NB: score matrix and the substitution matrix are different matrices!
"""
for row, y in enumerate(self.string2):
for col, x in enumerate(self.string1):
diag = self.score_matrix[row][col] + self.substitution_matrix[x][y]
hori = self.score_matrix[row + 1][col] + self.gap_penalty
verti = self.score_matrix[row][col + 1] + self.gap_penalty
maxi = max(diag, hori, verti, 0)
self.score_matrix[row + 1][col + 1] = maxi
def has_alignment(self):
"""
:return: True if a local alignment has been found, False otherwise
"""
if self.score_matrix.any() > 0:
return True
return False
def rec_back_step(self, row, col, result):
if row < 0 or col < 0:
return
x = self.string1[row - 1]
y = self.string2[col - 1]
current = self.score_matrix[col][row]
di = self.score_matrix[col - 1][row - 1]
ho = self.score_matrix[col][row - 1]
ve = self.score_matrix[col - 1][row]
current_result = deepcopy(result)
#print("String 1: ", self.string1)
#print("String 2: ", self.string2)
#print("x: ", self.string1[row - 1])
#print("y: ", self.string2[col - 1])
#print("Row: ", row, " Col: ", col)
if (row == 0 and col == 0) or current == 0:
current_result[0] = current_result[0][::-1]
current_result[1] = current_result[1][::-1]
tuple_result = tuple(current_result)
return tuple_result
#current_result[0] = current_result[0][:-1]
#current_result[1] = current_result[1][:-1]
if current - self.substitution_matrix[x][y] == di: # diagonal jump possible
#print("DIAG")
current_result[0] += x
current_result[1] += y
#print(current_result)
self.aligned_residues_1.append((x, row - 1))
self.aligned_residues_2.append((y, col - 1))
return self.rec_back_step(row - 1, col - 1, current_result)
if current - self.gap_penalty == ho: # horizontal jump possible
#print("HORI")
current_result[0] += x
current_result[1] += '-'
#print(current_result)
self.aligned_residues_1.append((x, row - 1))
self.aligned_residues_2.append(('-', col - 1))
return self.rec_back_step(row -1, col, current_result)
if current - self.gap_penalty == ve: # vertical jump possible
#print("VERT")
current_result[0] += '-'
current_result[1] += y
#print(current_result)
self.aligned_residues_1.append(('-', row - 1))
self.aligned_residues_2.append((y, col - 1))
return self.rec_back_step(row, col - 1, current_result)
current_result[0] = current_result[0][:-1]
current_result[1] = current_result[1][:-1]
return
def get_alignment(self):
"""
:return: alignment represented as a tuple of aligned strings
"""
if not self.has_alignment():
return ("", "")
else:
maximum = np.where(self.score_matrix == np.amax(self.score_matrix))
return self.rec_back_step(int(maximum[1]), int(maximum[0]), ["", ""])
def is_residue_aligned(self, string_number, residue_index):
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been alined
False otherwise
"""
if string_number == 1:
for res in self.aligned_residues_1:
if res[0] == self.string1[residue_index] and res[1] == residue_index:
return True
return False
if string_number == 2:
for res in self.aligned_residues_2:
if res[0] == self.string2[residue_index] and res[1] == residue_index:
return True
return False
return False
<file_sep>import numpy as np
import json
import statistics
import collections
from pathlib import Path
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
#config = json.loads(open('./blast_test.json').read())
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
# self.db_sequences = config["db_sequences"]
self.db_sequences = []
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
self.db_sequences.append(sequence)
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
matching_seq = [x for x in self.db_sequences if word in x]
self.matching_seq = matching_seq
return matching_seq
def get_words(self) -> object:
dict_words = {}
for s, seq in enumerate(self.db_sequences):
words = set(seq[i:i + 3] for i in range(0, len(seq), 1) if len(seq[i:i + 3]) > 2)
diff_words = list(words)
dict_words[s] = diff_words
return dict_words
def get_num_words(self):
words_list = self.get_words()
diff_word = set()
for key, value in words_list.items():
for v in value:
diff_word.add(v)
return len(diff_word)
def get_diff_words_in_db(self):
words_list = self.get_words()
diff_word = set()
for key, value in words_list.items():
for v in value:
diff_word.add(v)
return diff_word
def get_avg_num_words(self):
avg_num = 0
words_list = self.get_words()
if self.db_sequences:
avg_num = sum([len(v) for k,v in words_list.items()]) / len(self.db_sequences)
return int(round(avg_num))
def get_avg_num_seq(self):
num_words = self.get_diff_words_in_db()
num_seq_per_word = []
avg=0
for word in num_words:
num_seq_per_word.append(len(self.get_sequences(word)))
if num_words:
avg = sum(num_seq_per_word) / len(num_words)
return int(round(avg))
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
num_seq = len(self.db_sequences)
words_num = self.get_num_words()
avg_words = self.get_avg_num_words()
avg_seq_per_w= self.get_avg_num_seq()
stats_db = (num_seq, words_num, avg_words, avg_seq_per_w)
return stats_db
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.sub_matrix = substitution_matrix
# self.query_seq = config["query_seq"]
# self.query_pssm = config["query_pssm"]
@staticmethod
def get_query_words(seq):
words_list = []
for i in range(0, len(seq), 1):
if len(seq[i:i + 3]) > 2:
word = (seq[i:i + 3], i)
words_list.append(word)
return words_list
def get_scores(self, seq, word):
K = len(word)
L = len(seq)
score = 0
for i in range(0, K):
for j in range(0, L):
score = score + self.sub_matrix[AA_TO_INT[seq[i]]][AA_TO_INT[word[j]]]
return score
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
#return ['AAA', 'YYY']
blast_words = config["blast_words"]
blast_words=set(blast_words)
words = set()
sub_matrix = np.array(self.sub_matrix)
query_seq = config["query_seq"]
query_seq = sequence
words_list = self.get_query_words(query_seq)
for word, start in words_list:
for i in range(0,20):
s1 = sub_matrix[AA_TO_INT[word[0]]][i]
for j in range(0, 20):
s2 = sub_matrix[AA_TO_INT[word[1]]][j]
for k in range(0, 20):
s3 = sub_matrix[AA_TO_INT[word[0]]][k]
score = s1+s2+s3
if score >= T:
candidate = ALPHABET[i] + ALPHABET[j] + ALPHABET[k]
words.add(candidate)
return words
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]
#words = Blast(config["sub_matrix"]).get_query_words(config["query_seq"])
#words = Blast(config["sub_matrix"]).get_words()
#stats = BlastDb().get_db_stats()
#words = BlastDb().get_words()
#print(words)
<file_sep>import numpy as np
class Direction:
Left = 0b100
Digaonal = 0b010
Up = 0b001
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros(
(len(string2) + 1, len(string1) + 1), dtype=np.int)
self.alignment_paths = []
# The trace matrix encodes for each field (i, j) in the score matrix how
# we reached this field. We encode this information in a single integer as
# follows: We use a 1-hot-encoding where the first position marks the field
# 'left', the second marks 'diagonal' and the third one marks 'up', e.g.,
#
# trace_matrix[i][j] = 101 <=> we reached (i, j) from (i-1, j) or from
# (i, j-1)
self.trace_matrix = np.zeros(
(len(string2) + 1, len(string1) + 1), dtype=np.int32)
self.align()
self.aligning_paths()
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
for i in range(len(self.string2) + 1):
self.score_matrix[i][0] = i * self.gap_penalty
self.trace_matrix[i][0] = Direction.Up
for j in range(len(self.string1) + 1):
self.score_matrix[0][j] = j * self.gap_penalty
self.trace_matrix[0][j] = Direction.Left
for i in range(1, len(self.string2) + 1):
for j in range(1, len(self.string1) + 1):
diag_score = self.score_matrix[i - 1][j - 1] + \
self.substitution_matrix[self.string2[i - 1]][self.string1[j-1]]
up_score = self.score_matrix[i - 1][j] + self.gap_penalty
left_score = self.score_matrix[i][j - 1] + self.gap_penalty
self.score_matrix[i][j] = max(diag_score, up_score, left_score)
# Trace the calculation
trace = 0
if self.score_matrix[i][j] == diag_score:
trace |= Direction.Digaonal
if self.score_matrix[i][j] == up_score:
trace |= Direction.Up
if self.score_matrix[i][j] == left_score:
trace |= Direction.Left
self.trace_matrix[i][j] = trace
def aligning_paths(self):
paths = []
while True:
i = len(self.string2)
j = len(self.string1)
path = []
path.append((i, j))
unique_path = True
while True:
num_directions = bin(self.trace_matrix[i][j]).count('1')
if num_directions != 1:
unique_path = False
if self.trace_matrix[i][j] & Direction.Digaonal == Direction.Digaonal:
path.append((i - 1, j - 1))
if num_directions > 1:
self.trace_matrix[i][j] ^= Direction.Digaonal
i -= 1
j -= 1
elif self.trace_matrix[i][j] & Direction.Up == Direction.Up:
if num_directions > 1:
self.trace_matrix[i][j] ^= Direction.Up
path.append((i - 1, j))
i -= 1
elif self.trace_matrix[i][j] & Direction.Left == Direction.Left:
if num_directions > 1:
self.trace_matrix[i][j] ^= Direction.Left
path.append((i, j-1))
j -= 1
if j == 0 and i == 0:
paths.append(path)
break
if unique_path:
break
self.alignment_paths = paths
def get_best_score(self):
"""
:return: the highest score for the aligned strings, int
"""
return self.score_matrix[len(self.string2)][len(self.string1)]
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score
"""
return len(self.alignment_paths)
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
alignments = []
for p in self.alignment_paths:
path = p[:-1][::-1]
alignment = ["", ""]
old_i = 0
old_j = 0
for pair in path:
i = pair[0]
j = pair[1]
if old_i != i and old_j != j:
alignment[0] += self.string1[j-1]
alignment[1] += self.string2[i-1]
elif old_j == j:
alignment[0] += "-"
alignment[1] += self.string2[i-1]
else:
alignment[0] += self.string1[j-1]
alignment[1] += "-"
old_i = i
old_j = j
alignments.append((alignment[0], alignment[1]))
return alignments
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as a list of lists
"""
return self.score_matrix
<file_sep>##############
# Exercise 2.7
##############
#Pos,Neg,Hydro,Aromatic,Polar, Proline, Sulfur, Acid, Basic
all_aa = {'A': ['hydro'],
'R': ['pos', 'polar', 'basic'],
'N': ['polar'],
'D': ['neg', 'polar', 'acid'],
'C': ['sulfur'],
'Q': ['polar'],
'E': ['neg', 'polar', 'acid'],
'G': [],
'H': ['pos', 'aromatic', 'polar', 'basic'],
'I': ['hydro'],
'L': ['hydro'],
'K': ['pos', 'polar', 'basic'],
'M': ['hydro', 'sulfur'],
'F': ['hydro', 'aromatic'],
'P': ['proline'],
'S': ['polar'],
'T': ['polar'],
'W': ['hydro', 'aromatic'],
'Y': ['hydro', 'aromatic', 'polar'],
'V': ['hydro']}
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
return 'pos' in all_aa[aa]
def isNegativelyCharged(aa):
return 'neg' in all_aa[aa]
def isHydrophobic(aa):
return 'hydro' in all_aa[aa]
def isAromatic(aa):
return 'aromatic' in all_aa[aa]
def isPolar(aa):
return 'polar' in all_aa[aa]
def isProline(aa):
return 'proline' in all_aa[aa]
def containsSulfur(aa):
return 'sulfur' in all_aa[aa]
def isAcid(aa):
return 'acid' in all_aa[aa]
def isBasic(aa):
return 'basic' in all_aa[aa]<file_sep># -*- coding: utf-8 -*-
"""
IMPORTANT!:
Before writing an email asking questions such as
'What does this input has to be like?' or
'What return value do you expect?' PLEASE read our
exercise sheet and the information in this template
carefully.
If something is still unclear, PLEASE talk to your
colleagues before writing an email!
If you experience technical issues or if you find a
bug we are happy to answer your questions. However,
in order to provide quick help in such cases we need
to avoid unnecessary emails such as the examples
shown above.
"""
# Tip: This module might be useful for parsing...
from Bio.PDB.MMCIFParser import MMCIFParser
from Bio.PDB.Polypeptide import PPBuilder
from Bio.PDB.Polypeptide import is_aa
import numpy as np
############# Exercise 2: Protein Data Bank #############
# General remark: In our exercise every structure will have EXACTLY ONE model.
# This is true for nearly all X-Ray structures. NMR structures have several models.
class PDB_Parser:
CIF_PARSER = MMCIFParser() # parser object for reading in structure in CIF format
def __init__(self, path):
'''
Initialize every PDB_Parser with a path to a structure-file in CIF format.
An example file is included in the repository (7ahl.cif).
Tip: Store the parsed structure in an object variable instead of parsing it
again & again ...
'''
self.structure = self.CIF_PARSER.get_structure(
'7AHL', path) # Parse the structure once and re-use it in the functions below
# 3.8 Chains
def get_number_of_chains(self):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
Return:
Number of chains in this structure as integer.
'''
number_of_chains = 0
for model in self.structure:
for chain in model:
number_of_chains += 1
return number_of_chains
# 3.9 Sequence
def get_sequence(self, chain_id):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the amino acid sequence (single-letter alphabet!) of a given chain (chain_id)
in a Biopython.PDB structure as a string.
'''
ppb = PPBuilder()
for pp in ppb.build_peptides(self.structure[0][chain_id]):
return pp.get_sequence()
# 3.10 Water molecules
def get_number_of_water_molecules(self, chain_id):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the number of water molecules of a given chain (chain_id)
in a Biopython.PDB structure as an integer.
'''
number_of_water = 0
for residue in self.structure[0][chain_id]:
if residue.get_full_id()[3][0] == 'W':
number_of_water += 1
return number_of_water
# 3.11 C-Alpha distance
def get_ca_distance(self, chain_id_1, index_1, chain_id_2, index_2):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id_1 : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
index_1 : index of a residue in a given chain in a Biopython.PDB structure
chain_id_2 : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
index_2 : index of a residue in a given chain in a Biopython.PDB structure
chain_id_1 and index_1 describe precisely one residue in a PDB structure,
chain_id_2 and index_2 describe the second residue.
Return:
Return the C-alpha (!) distance between the two residues, described by
chain_id_1/index_1 and chain_id_2/index_2. Round the returned value via int().
The reason for using two different chains as an input is that also the distance
between residues of different chains can be interesting.
Different chains in a PDB structure can either occur between two different proteins
(Heterodimers) or between different copies of the same protein (Homodimers).
'''
atom1 = self.structure[0][chain_id_1][index_1]['CA']
atom2 = self.structure[0][chain_id_2][index_2]['CA']
return int(atom1 - atom2)
# 3.12 Contact Map
def get_contact_map(self, chain_id):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return a complete contact map (see description in exercise sheet)
for a given chain in a Biopython.PDB structure as numpy array.
The values in the matrix describe the c-alpha distance between all residues
in a chain of a Biopython.PDB structure.
Only integer values of the distance have to be given (see below).
'''
chain = self.structure[0][chain_id] # get the chain
# calculate the chain using the residues and removing water molecules
length = len(chain)-self.get_number_of_water_molecules(chain_id)
# create an empty contact map
contact_map = np.zeros((length, length), dtype=np.float32)
for residue1 in chain: # loop through the chain
res_name1 = residue1.get_resname()
res_id1 = residue1.get_id()[1]
for residue2 in chain:
res_name2 = residue2.get_resname()
res_id2 = residue2.get_id()[1]
if((res_name1 != 'HOH') and (res_name2 != 'HOH')): # only look at residues that are not water
contact_map[res_id1 - 1][res_id2 - 1] = residue1['CA'] - residue2['CA']
print(contact_map)
return contact_map.astype(np.int) # return rounded (integer) values
# 3.13 B-Factors
def get_bfactors(self, chain_id):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the B-Factors for all residues in a chain of a Biopython.PDB structure.
The B-Factors describe the mobility of an atom or a residue.
In a Biopython.PDB structure B-Factors are given for each atom in a residue.
Calculate the mean B-Factor for a residue by averaging over the B-Factor
of all atoms in a residue.
Sometimes B-Factors are not available for a certain residue;
(e.g. the residue was not resolved); insert np.nan for those cases.
Finally normalize your B-Factors using Standard scores (zero mean, unit variance).
You have to use np.nanmean, np.nanvar etc. if you have nan values in your array.
The returned data structure has to be a numpy array rounded again to integer.
'''
length = 0
for residue in self.structure[0][chain_id].get_residues():
if is_aa(residue):
length += 1
b_factors = np.zeros(length, dtype=np.float32) # setup array
counter = 0
for residue in self.structure[0][chain_id].get_residues():
if is_aa(residue):
atom_count = 0
atom_b_factor = 0
for atom in residue.get_atoms():
atom_count += 1
atom_b_factor += atom.get_bfactor()
atom_b_factor /= atom_count
b_factors[counter] = atom_b_factor
counter += 1
b_factors = (b_factors - np.nanmean(b_factors))/np.nanstd(b_factors)
return b_factors.astype(np.int) # return rounded (integer) values
def main():
print('PDB parser class.')
parser = PDB_Parser('tests/7ahl.cif')
# protein.get_number_of_chains()
# print protein.get_sequence('A')
# protein.get_number_of_water_molecules('A')
# protein.get_ca_distance('A',10,'B',10)
parser.get_contact_map('B')
# print protein.get_bfactors('B')
return None
if __name__ == '__main__':
main()
<file_sep>import numpy as np
from copy import deepcopy
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.alignments = []
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.backtrace_matrix = [[[] for i in range(len(string1) + 1)] for j in range(len(string2) + 1)]
self.align()
def align(self):
"""
Align given strings using the Smith-Waterman algorithm.
NB: score matrix and the substitution matrix are different matrices!
"""
len_str_1 = len(self.string1)
len_str_2 = len(self.string2)
# Initialize first row and column with gap penalties
self.score_matrix[0, :] = 0
self.score_matrix[:, 0] = 0
for i in range(1, len_str_2 + 1):
self.backtrace_matrix[i][0] = [[i - 1, 0]]
for i in range(1, len_str_1 + 1):
self.backtrace_matrix[0][i] = [[0, i - 1]]
# Find alignments
for i2 in range(1, len_str_2 + 1):
for i1 in range(1, len_str_1 + 1):
coordinates = [[i2 - 1, i1 - 1],
[i2 - 1, i1],
[i2, i1 - 1]]
scores = [self.score_matrix[i2 - 1, i1 - 1] +
self.substitution_matrix[self.string1[i1 - 1]][self.string2[i2 - 1]],
self.score_matrix[i2 - 1, i1] + self.gap_penalty,
self.score_matrix[i2, i1 - 1] + self.gap_penalty]
max_score = max(scores)
self.score_matrix[i2, i1] = max(scores)
for i_score, score in enumerate(scores):
if score == max_score:
self.backtrace_matrix[i2][i1].append(coordinates[i_score])
self.score_matrix[self.score_matrix < 0] = 0
if self.score_matrix.sum() == 0:
self.alignment = ['', '']
else:
max_score = self.score_matrix.max()
i_max_score = [0, 0]
for i in range(self.score_matrix.shape[0]):
for j in range(self.score_matrix.shape[1]):
if self.score_matrix[i, j] == max_score:
i_max_score = [i, j]
self.aligned_indices = [[False] * len_str_1, [False] * len_str_2]
self.alignment = self.backtrace(index=i_max_score, parents=self.backtrace_matrix[i_max_score[0]][i_max_score[1]],
in_alignment=['', ''], path=[], aligned_indices=self.aligned_indices)
pass
def backtrace(self, index, parents, in_alignment, path, aligned_indices):
alignments = []
path.append(index)
if self.score_matrix[index[0], index [1]] == 0:
return [x[::-1] for x in in_alignment]
else:
for parent in parents:
alignment = deepcopy(in_alignment)
aligned_indices[0][index[1] - 1] = True
aligned_indices[1][index[0] - 1] = True
if parent[0] == index[0] - 1 and parent[1] == index[1] - 1:
alignment[0] += self.string1[index[1]-1]
alignment[1] += self.string2[index[0]-1]
# if alignment[0][-1] == alignment[1][-1]:
elif parent[0] == index[0] -1:
alignment[0] += '-'
alignment[1] += self.string2[index[0]-1]
elif parent[1] == index[1] - 1:
alignment[0] += self.string1[index[1]-1]
alignment[1] += '-'
alignments.extend(self.backtrace(parent, self.backtrace_matrix[parent[0]][parent[1]], alignment, path, aligned_indices))
return alignments
def has_alignment(self):
"""
:return: True if a local alignment has been found, False otherwise
"""
return self.alignment != ['', '']
def get_alignment(self):
"""
:return: alignment represented as a tuple of aligned strings
"""
return tuple(self.alignment)
def is_residue_aligned(self, string_number, residue_index):
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been alined
False otherwise
"""
return self.aligned_indices[string_number-1][residue_index]
if __name__ == "__main__":
blosum = {
'A': {'A': 4, 'C': 0, 'B': -2, 'E': -1, 'D': -2, 'G': 0, 'F': -2, 'I': -1, 'H': -2, 'K': -1, 'M': -1, 'L': -1,
'N': -2, 'Q': -1, 'P': -1, 'S': 1, 'R': -1, 'T': 0, 'W': -3, 'V': 0, 'Y': -2, 'X': 0, 'Z': -1},
'C': {'A': 0, 'C': 9, 'B': -3, 'E': -4, 'D': -3, 'G': -3, 'F': -2, 'I': -1, 'H': -3, 'K': -3, 'M': -1, 'L': -1,
'N': -3, 'Q': -3, 'P': -3, 'S': -1, 'R': -3, 'T': -1, 'W': -2, 'V': -1, 'Y': -2, 'X': -2, 'Z': -3},
'B': {'A': -2, 'C': -3, 'B': 4, 'E': 1, 'D': 4, 'G': -1, 'F': -3, 'I': -3, 'H': 0, 'K': 0, 'M': -3, 'L': -4,
'N': 3, 'Q': 0, 'P': -2, 'S': 0, 'R': -1, 'T': -1, 'W': -4, 'V': -3, 'Y': -3, 'X': -1, 'Z': 1},
'E': {'A': -1, 'C': -4, 'B': 1, 'E': 5, 'D': 2, 'G': -2, 'F': -3, 'I': -3, 'H': 0, 'K': 1, 'M': -2, 'L': -3,
'N': 0, 'Q': 2, 'P': -1, 'S': 0, 'R': 0, 'T': -1, 'W': -3, 'V': -2, 'Y': -2, 'X': -1, 'Z': 4},
'D': {'A': -2, 'C': -3, 'B': 4, 'E': 2, 'D': 6, 'G': -1, 'F': -3, 'I': -3, 'H': -1, 'K': -1, 'M': -3, 'L': -4,
'N': 1, 'Q': 0, 'P': -1, 'S': 0, 'R': -2, 'T': -1, 'W': -4, 'V': -3, 'Y': -3, 'X': -1, 'Z': 1},
'G': {'A': 0, 'C': -3, 'B': -1, 'E': -2, 'D': -1, 'G': 6, 'F': -3, 'I': -4, 'H': -2, 'K': -2, 'M': -3, 'L': -4,
'N': 0, 'Q': -2, 'P': -2, 'S': 0, 'R': -2, 'T': -2, 'W': -2, 'V': -3, 'Y': -3, 'X': -1, 'Z': -2},
'F': {'A': -2, 'C': -2, 'B': -3, 'E': -3, 'D': -3, 'G': -3, 'F': 6, 'I': 0, 'H': -1, 'K': -3, 'M': 0, 'L': 0,
'N': -3, 'Q': -3, 'P': -4, 'S': -2, 'R': -3, 'T': -2, 'W': 1, 'V': -1, 'Y': 3, 'X': -1, 'Z': -3},
'I': {'A': -1, 'C': -1, 'B': -3, 'E': -3, 'D': -3, 'G': -4, 'F': 0, 'I': 4, 'H': -3, 'K': -3, 'M': 1, 'L': 2,
'N': -3, 'Q': -3, 'P': -3, 'S': -2, 'R': -3, 'T': -1, 'W': -3, 'V': 3, 'Y': -1, 'X': -1, 'Z': -3},
'H': {'A': -2, 'C': -3, 'B': 0, 'E': 0, 'D': -1, 'G': -2, 'F': -1, 'I': -3, 'H': 8, 'K': -1, 'M': -2, 'L': -3,
'N': 1, 'Q': 0, 'P': -2, 'S': -1, 'R': 0, 'T': -2, 'W': -2, 'V': -3, 'Y': 2, 'X': -1, 'Z': 0},
'K': {'A': -1, 'C': -3, 'B': 0, 'E': 1, 'D': -1, 'G': -2, 'F': -3, 'I': -3, 'H': -1, 'K': 5, 'M': -1, 'L': -2,
'N': 0, 'Q': 1, 'P': -1, 'S': 0, 'R': 2, 'T': -1, 'W': -3, 'V': -2, 'Y': -2, 'X': -1, 'Z': 1},
'M': {'A': -1, 'C': -1, 'B': -3, 'E': -2, 'D': -3, 'G': -3, 'F': 0, 'I': 1, 'H': -2, 'K': -1, 'M': 5, 'L': 2,
'N': -2, 'Q': 0, 'P': -2, 'S': -1, 'R': -1, 'T': -1, 'W': -1, 'V': 1, 'Y': -1, 'X': -1, 'Z': -1},
'L': {'A': -1, 'C': -1, 'B': -4, 'E': -3, 'D': -4, 'G': -4, 'F': 0, 'I': 2, 'H': -3, 'K': -2, 'M': 2, 'L': 4,
'N': -3, 'Q': -2, 'P': -3, 'S': -2, 'R': -2, 'T': -1, 'W': -2, 'V': 1, 'Y': -1, 'X': -1, 'Z': -3},
'N': {'A': -2, 'C': -3, 'B': 3, 'E': 0, 'D': 1, 'G': 0, 'F': -3, 'I': -3, 'H': 1, 'K': 0, 'M': -2, 'L': -3,
'N': 6, 'Q': 0, 'P': -2, 'S': 1, 'R': 0, 'T': 0, 'W': -4, 'V': -3, 'Y': -2, 'X': -1, 'Z': 0},
'Q': {'A': -1, 'C': -3, 'B': 0, 'E': 2, 'D': 0, 'G': -2, 'F': -3, 'I': -3, 'H': 0, 'K': 1, 'M': 0, 'L': -2,
'N': 0, 'Q': 5, 'P': -1, 'S': 0, 'R': 1, 'T': -1, 'W': -2, 'V': -2, 'Y': -1, 'X': -1, 'Z': 3},
'P': {'A': -1, 'C': -3, 'B': -2, 'E': -1, 'D': -1, 'G': -2, 'F': -4, 'I': -3, 'H': -2, 'K': -1, 'M': -2,
'L': -3, 'N': -2, 'Q': -1, 'P': 7, 'S': -1, 'R': -2, 'T': -1, 'W': -4, 'V': -2, 'Y': -3, 'X': -2,
'Z': -1},
'S': {'A': 1, 'C': -1, 'B': 0, 'E': 0, 'D': 0, 'G': 0, 'F': -2, 'I': -2, 'H': -1, 'K': 0, 'M': -1, 'L': -2,
'N': 1, 'Q': 0, 'P': -1, 'S': 4, 'R': -1, 'T': 1, 'W': -3, 'V': -2, 'Y': -2, 'X': 0, 'Z': 0},
'R': {'A': -1, 'C': -3, 'B': -1, 'E': 0, 'D': -2, 'G': -2, 'F': -3, 'I': -3, 'H': 0, 'K': 2, 'M': -1, 'L': -2,
'N': 0, 'Q': 1, 'P': -2, 'S': -1, 'R': 5, 'T': -1, 'W': -3, 'V': -3, 'Y': -2, 'X': -1, 'Z': 0},
'T': {'A': 0, 'C': -1, 'B': -1, 'E': -1, 'D': -1, 'G': -2, 'F': -2, 'I': -1, 'H': -2, 'K': -1, 'M': -1, 'L': -1,
'N': 0, 'Q': -1, 'P': -1, 'S': 1, 'R': -1, 'T': 5, 'W': -2, 'V': 0, 'Y': -2, 'X': 0, 'Z': -1},
'W': {'A': -3, 'C': -2, 'B': -4, 'E': -3, 'D': -4, 'G': -2, 'F': 1, 'I': -3, 'H': -2, 'K': -3, 'M': -1, 'L': -2,
'N': -4, 'Q': -2, 'P': -4, 'S': -3, 'R': -3, 'T': -2, 'W': 11, 'V': -3, 'Y': 2, 'X': -2, 'Z': -3},
'V': {'A': 0, 'C': -1, 'B': -3, 'E': -2, 'D': -3, 'G': -3, 'F': -1, 'I': 3, 'H': -3, 'K': -2, 'M': 1, 'L': 1,
'N': -3, 'Q': -2, 'P': -2, 'S': -2, 'R': -3, 'T': 0, 'W': -3, 'V': 4, 'Y': -1, 'X': -1, 'Z': -2},
'Y': {'A': -2, 'C': -2, 'B': -3, 'E': -2, 'D': -3, 'G': -3, 'F': 3, 'I': -1, 'H': 2, 'K': -2, 'M': -1, 'L': -1,
'N': -2, 'Q': -1, 'P': -3, 'S': -2, 'R': -2, 'T': -2, 'W': 2, 'V': -1, 'Y': 7, 'X': -1, 'Z': -2},
'X': {'A': 0, 'C': -2, 'B': -1, 'E': -1, 'D': -1, 'G': -1, 'F': -1, 'I': -1, 'H': -1, 'K': -1, 'M': -1, 'L': -1,
'N': -1, 'Q': -1, 'P': -2, 'S': 0, 'R': -1, 'T': 0, 'W': -2, 'V': -1, 'Y': -1, 'X': -1, 'Z': -1},
'Z': {'A': -1, 'C': -3, 'B': 1, 'E': 4, 'D': 1, 'G': -2, 'F': -3, 'I': -3, 'H': 0, 'K': 1, 'M': -1, 'L': -3,
'N': 0, 'Q': 3, 'P': -1, 'S': 0, 'R': 0, 'T': -1, 'W': -3, 'V': -2, 'Y': -2, 'X': -1, 'Z': 4}
}
identity = {
'A': {'A': 1, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0,
'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'R': {'A': 0, 'R': 1, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0,
'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'N': {'A': 0, 'R': 0, 'N': 1, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0,
'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'D': {'A': 0, 'R': 0, 'N': 0, 'D': 1, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0,
'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'C': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 1, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0,
'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'E': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 1, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0,
'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'Q': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 1, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0,
'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'G': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 1, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0,
'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'H': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 1, 'I': 0, 'L': 0, 'K': 0, 'M': 0,
'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'I': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 1, 'L': 0, 'K': 0, 'M': 0,
'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'L': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 1, 'K': 0, 'M': 0,
'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'K': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 1, 'M': 0,
'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'M': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 1,
'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'F': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0,
'F': 1, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'P': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0,
'F': 0, 'P': 1, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'S': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0,
'F': 0, 'P': 0, 'S': 1, 'T': 0, 'W': 0, 'Y': 0, 'V': 0},
'T': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0,
'F': 0, 'P': 0, 'S': 0, 'T': 1, 'W': 0, 'Y': 0, 'V': 0},
'W': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0,
'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 1, 'Y': 0, 'V': 0},
'Y': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0,
'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 1, 'V': 0},
'V': {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'E': 0, 'Q': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0, 'M': 0,
'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 1}
}
alignment = LocalAlignment("ARNDCEQGHI", "DDCEQHG", -6, blosum)
<file_sep>import numpy as np
#from tests.matrices import MATRICES
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.align()
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
# init cumulative score
for i in range(1, self.score_matrix.shape[0], 1):
self.score_matrix[i, 0] = self.score_matrix[i - 1, 0] + self.gap_penalty
for j in range(1, self.score_matrix.shape[1], 1):
self.score_matrix[0, j] = self.score_matrix[0, j - 1] + self.gap_penalty
# backtracking
self.backtrack = [[[] for j in range(self.score_matrix.shape[1])] for i in range(self.score_matrix.shape[0])]
for i in range(1, self.score_matrix.shape[0], 1):
for j in range(1, self.score_matrix.shape[1], 1):
d = self.score_matrix[i - 1, j - 1] + self.substitution_matrix[self.string2[i - 1]][self.string1[j - 1]]
v = self.score_matrix[i - 1, j] + self.gap_penalty
h = self.score_matrix[i, j - 1] + self.gap_penalty
self.score_matrix[i, j] = max([d, v, h])
if self.score_matrix[i, j] == d:
self.backtrack[i][j].append('d')
if self.score_matrix[i, j] == v:
self.backtrack[i][j].append('v')
if self.score_matrix[i, j] == h:
self.backtrack[i][j].append('h')
def get_best_score(self):
"""
:return: the highest score for the aligned strings, int
"""
return self.score_matrix[-1, -1]
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score
"""
return len(self.get_alignments())
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
def rec_get_alignments(i, j):
if i == 0 and j == 0:
return [('', '')]
res = []
for dir in self.backtrack[i][j]:
if dir == 'd':
l = rec_get_alignments(i - 1, j - 1)
#print(l)
res += [(x + self.string1[j - 1], y + self.string2[i - 1]) for (x, y) in l]
elif dir == 'v':
l = rec_get_alignments(i - 1, j)
#print(l)
res += [(x + '-', y + self.string2[i - 1]) for (x, y) in l]
else:
l = rec_get_alignments(i, j - 1)
#print(l)
res += [(x + self.string1[j - 1], y + '-') for (x, y) in l]
return res
return rec_get_alignments(self.score_matrix.shape[0] - 1, self.score_matrix.shape[1] - 1)
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as a list of lists
"""
return self.score_matrix.tolist()
"""
if __name__ == '__main__':
a = GlobalAlignment("SCYTHE", "SCTHE", -6, MATRICES["blosum"])
print(a.score_matrix)
print(a.get_alignments())
print(a.get_best_score())
"""
<file_sep>import numpy as np
from collections import Counter
import math
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
def rounde(floate) :
flor = math.floor(floate)
if floate - flor > 0.5 :
return flor + 1
else :
return flor
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
if sequences == [] :
raise TypeError
length = len(sequences[0])
if len(sequences) < 1 :
raise TypeError
for seq in sequences :
if len(seq) != length :
raise TypeError
for let in seq :
if let not in ALPHABET :
raise TypeError
self.sequences = sequences
pass
def count_observed_and_gaps(self, use_sequence_weights, redistribute_gaps, bg_m) :
seq = self.sequences
c_matrix = np.zeros((len(seq[0]), 21))
if not use_sequence_weights :
for i in range(len(seq[0])) :
w = []
for k in range(len(seq)) :
w.append(seq[k][i])
counted = Counter(w)
for ju in counted :
c_matrix[i][AA_TO_INT[ju]] = counted[ju]
else:
w = self.get_sequence_weights()
for i in range(len(seq[0])) :
for j in range(len(c_matrix[i])) :
nn = 0
for sequ in seq :
if INT_TO_AA[j] == sequ[i] :
c_matrix[i, AA_TO_INT[sequ[i]]] += w[nn]
nn += 1
bg_freq = self.bg_freq(bg_m)
if redistribute_gaps :
for i in range(len(seq[0])) :
row = c_matrix[i]
for j in range(len(row)-1) :
c_matrix[i, j] += row[GAP_INDEX] * bg_freq[j]
c_matrix = np.delete(c_matrix, -1, axis = 1)
k = 0
for i in c_matrix :
summ = 0
for j in i :
summ += j
for n in range(len(i)) :
c_matrix[k][n] /= (summ * bg_freq[n])
k += 1
#print(c_matrix)
return c_matrix
def bg_freq(self, bg_m) :
res = np.zeros(len(bg_m))
for p in range(len(bg_m)) :
som = 0
for elem in bg_m[p] :
som += elem
res[p] = som
return res
def get_pseudo_counts(self, bg_m, matrix) : #Returns a len(seq[0] x 20 matrix)
seq = self.sequences
N = len(seq[0])
pc = np.zeros((N, 20))
for i in range(N) :
for a in range(20) :
som = 0
for j in range(20) :
som += matrix[i][j] * bg_m[j][a]
pc[i,a] = som
return pc
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
pssm = np.zeros((20, 20))
seq = self.sequences
if bg_matrix == None :
bg_matrix = np.full((20,20), 0.0025)
#Count & redistribute
c_matrix = self.count_observed_and_gaps(use_sequence_weights, redistribute_gaps, bg_matrix)
#Get pseudocounts
if add_pseudocounts :
pssm = np.zeros(c_matrix.shape)
pc_matrix = self.get_pseudo_counts(bg_matrix, c_matrix)
ind = self.get_number_of_observations()
alpha = ind-1
for i in range(len(c_matrix)) :
for j in range(len(c_matrix[i])) :
ss = alpha + beta
pssm[i][j] = (alpha * c_matrix[i][j] + beta * pc_matrix[i][j])/ss
print(pssm)
else :
pssm = c_matrix
#bg = self.bg_freq(bg_matrix)
for i in range(len(pssm)) :
for k in range(len(pssm[i])) :
if pssm[i][k] == 0 :
pssm[i][k] = -20
else :
#print(pssm[i][k], i, k)
pssm[i][k] = rounde(2 * math.log(pssm[i][k], 2))
#Delete gaps / adjusting to the primary sequence
i = len(seq[0])-1
while i >= 0 :
if seq[0][i] == "-" :
pssm = np.delete(pssm, i, axis = 0)
i -= 1
return np.rint(pssm).astype(np.int64)
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
sequences = self.sequences
number = len(sequences)
length = len(sequences[0])
return (number, length)
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
return self.sequences[0].replace("-","")
def get_weights_matrix(self) :
seq = self.sequences
n = len(seq[0])
m = np.zeros((n, len(seq)+1), dtype = np.float128)
for i in range(n) :
w = []
for k in range(len(seq)) :
w.append(seq[k][i])
stats = Counter(w)
r = len(stats)
m[i, len(seq)] = r
for j in range(len(seq)) :
ll = 1/(r * stats[w[j]])
m[i,j] = ll
return m.astype(float)
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
seq = self.sequences
m = self.get_weights_matrix()
weights = np.zeros(len(seq))
#print(m, weights.shape)
for j in range(len(seq)) :
summ = 0
for i in range(len(seq[0])) :
r = m[i, len(seq)]
if r > 1 :
summ += m[i,j]
weights[j] = summ
return weights.astype(np.float64)
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
seq = self.sequences
m = self.get_weights_matrix()
r = m[:, len(seq)]
nie = len(seq[0])
summ = 0
for val in r :
summ += val
L = summ/nie
return L.astype(np.float64)
msa= [
"--------MVATGLFVGLNKGHVVTKRE----------QPPRPNNRKGKTSKRTIFIRNLIKEVAGQAPYEKRITELLKVGKD---KRALKVAKRKLGTHKRAKRKREEMSSVLRKMRSGGAGASEKKK----",
"----MTTPQVKTGLFVGLNKGHVVTRRE----------LAPRPRSRKGKTSKRTIFIRNLIKEVAGQAPYEKRITELLKVGKD---KRALKVAKRKLGTHKRAKRKREEMSSVLRKMRSGGGGATEKKK----",
"----MTTPAVKTGLFVGLNKGHVVTRRE----------LAPRPNSRKGKTSKRTIFIRKLIREVAGMAPYEKRITELLKVGKD---KRALKVAKRKLGTHKRAKRKREEMSSVLRKMRSLGGAAAAEKKM---",
"----MTTPQVKTGLFVGLNKGHVVTRRE----------LAPRPRSRKGKTSKRTIFIRNLIKEVAGQAPYEKRITELLK------------VAKRKLGTHKRAKRKREEMSSVLRKMRSGGGGATEKKK----",
"----------MGEIAVGLNKGHQVTKKA----------GTPRPSRRKGFLSQRVKKVRAVVREVAGWAPYERRVMELLKVGKD---KRALKMCKRKLGTHMRGKKKREEMAGVLRKMQAASKGE---------",
"----MAPKQPNTGLFVGLNKGHIVTKKE----------LAPRPSDRKGKTSKRTHFVRNLIREVAGFAPYEKRITELLKVGKD---KRALKVRQEKVGHSQESKEEER--GDVQCSP--------PDEGWWWY",
"---------MAPGLVVGLNKGKVLTKRQ----------LPERPSRRKGQLSKRTSFVRSIVREVAGFAPYERRVMELIRNSQD---KRARKLAKKRLGTLKRAKGKIEELTSVIQSSRLAH------------",
"-------MGVQYKLAVGLGKGHKVTKNE----------YKPRPSRRKGALSKHTRFVRDLIREVCGFAPFERRAMELLKVSKD---KRALKFIKKRLGTHLRGKRKRDELSNVLVAQRKAAAHKEKTEHK---",
"-------MALRYPMAVGLNKGHKVTKNV----------SKPRHSRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"-------MALRYPMAVGLNKGHKVTKNV----------SKPRHSRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"-------MALRYPMAVGLNKGHKVTKNV----------GKPRHSRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"---------MAPGLVVGLNKGKTLTKRQ----------LPERPSRRKGHLSKRTAFVRSIVREVAGFAPYERRVMELIRNSQD---KRARKLAKKRLGTLKRAKGKIEELTSVIQSSRLAH------------",
"-------MAIRYPMAVGLNKGHKVTKNV----------SKPRHCRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"-------MAIRYPMAVGLNKGHRVTKNV----------TKPRHCRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRIGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"-------MAIRYPMAVGLNKGHRVTKNV----------TKPRHCRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRIGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"-------MALRYPMAVGLNKGHKVTKNV----------SKPRHSRRRGRLTKHTKFVRDMIREVCAFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"-------MAIRYPMAVGLNKGYKVTKNV----------SKPRHCRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"-------MVVRYPMAVGLNKGHKVTKNV----------SKPKHSRRRGRLTKHAKFARDLIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNTLAAMRKAAAKKE--------",
"-------MAIRYPMAVGLKKGHPVTKNV----------TKPKHSRRGGRLTKHSKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNILAAMRKAAAKKE--------",
"---MAKEAPAKTGLAVGLNKGHKTTARV----------VKPRVSRTKGHLSKRTAFVREVVKEVAGLAPYERRVIELLRNSKD---KRARKLAKKRLGTFGRAKRKVDELQRVIAESRRAH------------",
"-------MALRYPMAVGLNKGHKVTKNV----------SKPRHSRRRSRLTNHTKFVRDMIREVCGFAPYERRAMELLKVSKS---KRALKFIKKRVGTHIRAKRKREELSNVLAAMEEAAAKKD--------",
"-----MSGPGIEGLAVGLNKGHAATQLP----------VKQRQNRHKGVASKKTKIVRELVREITGFAPYERRVLEMLRISKD---KRALKFLKRRIGTHRRAKGKREELQNVIIAQRKAHK-----------",
"--------MAKSGIAAGVNKGRKTTAKE----------VAPKISYRKGASSQRTVFVRSIVKEVAGLAPYERRLIELIRNAGE---KRAKKLAKKRLGTHKRALRKVEEMTQVIAESRRH-------------",
"-------MAVRYELAIGLNKGHKTSKIRNVKYTGDKKVKGLRGSRLKNIQTRHTKFMRDLVREVVGHAPYEKRTMELLKVSKD---KRALKFLKRRLGTHIRAKRKREELSNILTQLRKAQTHAK--------",
"-------MAVKTGIAIGLNKGKKVTQMT----------PAPKISYKKGAASNRTKFVRSLVREIAGLSPYERRLIDLIRNSGE---KRARKVAKKRLGSFTRAKAKVEEMNNIIAASRRH-------------",
"-------MTVKTGIAIGLNKGKKVTSMT----------PAPKISYKKGAASNRTKFVRSLVREIAGLSPYERRLIDLIRNSGE---KRARKVAKKRLGSFTRAKAKVEEMNNIIAASRRH-------------",
"---------MAKGQAVGINKGFITTQLE-------KKLQKHSAVQRKGKLGKRVALVRQVIREVTGFAPYEKRIIELIKAGSAKDSKKATKIARKRLGTHRRAKVKKALLEEAVRAQRKK-------------",
"MSSAATKPVKRSGIIKGFNKGHAVAKRT------------VTSTFKKQVVTKRVAAIRDVIREISGFSPYERRVSELLKSGLD---KRALKVAKKRLGSIQAGKKKRDDIANINRKASAK-------------",
"MKNA--------------------YKKVRVRYPVKRPDVKRKQRGPRAETQESRFLAAAVADEISGLSPLEKKAISLLEAKNN---NKAQKLLRKRLGSHKRAVAKVEKLARMLLEK----------------"
]
#test = MSA(msa)
#print(test.get_number_of_observations())<file_sep># -*- coding: utf-8 -*-
"""
IMPORTANT!:
Before writing an email asking questions such as
'What does this input has to be like?' or
'What return value do you expect?' PLEASE read our
exercise sheet and the information in this template
carefully.
If something is still unclear, PLEASE talk to your
colleagues before writing an email!
If you experience technical issues or if you find a
bug we are happy to answer your questions. However,
in order to provide quick help in such cases we need
to avoid unnecessary emails such as the examples
shown above.
"""
from Bio import SeqIO # Tip: This module might be useful for parsing...
############ Exercise 3: SwissProt ##########
class SwissProt_Parser:
PARSER = SeqIO
def __init__( self, path, frmt='uniprot-xml' ):
'''
Initialize every SwissProt_Parser with a path to a XML-formatted UniProt file.
An example file is included in the repository (P09616.xml).
Tip: Store the parsed XML entry in an object variable instead of parsing it
again & again ...
'''
self.sp_id = None
self.sp_name = None
self.sp_sequence_length = None
self.sp_organism = None
self.sp_subcellularlocation_location = None
self.sp_pdb_support = []
self.sp_anno = SeqIO.parse(path, frmt) # Parse the XML file once and re-use it in the functions below
for s in self.sp_anno:
#print(s)
self.sp_id = s.id
self.sp_name = s.name
self.sp_sequence_length = len(s.seq)
self.sp_organism = s.annotations['organism']
self.sp_subcellularlocation_location = s.annotations['comment_subcellularlocation_location']
dbxrefs = s.dbxrefs
for dbxref in dbxrefs:
if 'PDB:' in dbxref:
self.sp_pdb_support.append(dbxref[4:])
# 3.2 SwissProt Identifiers
def get_sp_identifier( self ):
'''
Input:
self: Use XML entry which has been parsed & saved during object initialization
Return:
Unique SwissProt identifier for the given xml file
'''
identifier = self.sp_id
return identifier
def get_sp_name(self):
identifier = self.sp_name
return identifier
# 3.3 SwissProt Sequence length
def get_sp_sequence_length( self ):
'''
Input:
self: Use XML entry which has been parsed & saved during object initialization
Return:
Return sequence length of the UniProt entry as an integer.
'''
seq_len = self.sp_sequence_length
return seq_len
# 3.4 Organism
def get_organism( self ):
'''
Input:
self: Use XML entry which has been parsed & saved during object initialization
Return:
Return the name of the organsim as stated in the corresponding field
of the XML data. Return value has to be a string.
'''
organism = self.sp_organism
return organism
# 3.5 Localizations
def get_localization( self ):
'''
Input:
self: Use XML entry which has been parsed & saved during object initialization
Return:
Return the name of the subcellular localization as stated in the
corresponding field.
Return value has to be a list of strings.
'''
localization = self.sp_subcellularlocation_location
return localization
# 3.6 Cross-references to PDB
def get_pdb_support( self ):
'''
Input:
self: Use XML entry which has been parsed & saved during object initialization
Return:
Returns a list of all PDB IDs which support the annotation of the
given SwissProt XML file. Return the PDB IDs as list.
'''
pdb_ids = self.sp_pdb_support
return pdb_ids
def main():
print('SwissProt XML Parser class')
parser = SwissProt_Parser('./tests/P09616.xml')
print("id: {0}, name: {1}, length: {2}, organism: {3}, locations: {4}, dbxrefs: {5}".format(parser.get_sp_identifier(), parser.get_sp_name(), parser.get_sp_sequence_length(), parser.get_organism(), parser.get_localization(), parser.get_pdb_support()))
return None
if __name__ == '__main__':
main()
<file_sep>import numpy as np
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.align()
def align(self):
"""
Align given strings using the Smith-Waterman algorithm.
NB: score matrix and the substitution matrix are different matrices!
"""
n, m = self.score_matrix.shape
self.score_matrix[0][0] = 0
for i in range(1, n, 1):
self.score_matrix[i][0] = 0
for j in range(1, m, 1):
self.score_matrix[0][j] = 0
for i in range(1, n, 1):
for j in range(1, m, 1):
c1 = self.string2[i - 1]
c2 = self.string1[j - 1]
score = self.score_matrix[i - 1][j - 1] + self.substitution_matrix[c1][c2]
self.score_matrix[i][j] = max(
self.score_matrix[i][j - 1] + self.gap_penalty,
self.score_matrix[i - 1][j] + self.gap_penalty,
score,
0
)
def has_alignment(self):
"""
:return: True if a local alignment has been found, False otherwise
"""
return self.score_matrix.max() > 0
def construct_alignments(self, i, j, tmp1, tmp2):
if i == 0 or j == 0:
return tmp1, tmp2
now = self.score_matrix[i][j]
if now == 0:
return tmp1, tmp2
c1 = self.string2[i - 1]
c2 = self.string1[j - 1]
if now == self.score_matrix[i][j - 1] + self.gap_penalty:
return self.construct_alignments(i, j - 1, c2 + tmp1, "-" + tmp2)
if now == self.score_matrix[i - 1][j] + self.gap_penalty:
return self.construct_alignments(i - 1, j, "-" + tmp1, c1 + tmp2)
if now == self.score_matrix[i - 1][j - 1] + self.substitution_matrix[c1][c2]:
return self.construct_alignments(i - 1, j - 1, c2 + tmp1, c1 + tmp2)
def get_alignment(self):
"""
:return: alignment represented as a tuple of aligned strings
"""
max_i = np.argmax(self.score_matrix)
n, m = self.score_matrix.shape
i = int(max_i / m)
j = max_i % m
return self.construct_alignments(i, j, "", "")
def contains_residue(self, i, j, string_number, residue_index):
if (string_number == 1 and j < residue_index) or (string_number == 2 and i < residue_index):
return False
if (string_number == 1 and j == residue_index) or (string_number == 2 and i == residue_index):
return True
if i == 0 or j == 0:
return False
now = self.score_matrix[i][j]
if now == 0:
return False
c1 = self.string2[i - 1]
c2 = self.string1[j - 1]
if now == self.score_matrix[i][j - 1] + self.gap_penalty:
return self.contains_residue(i, j - 1, string_number, residue_index)
if now == self.score_matrix[i - 1][j] + self.gap_penalty:
return self.contains_residue(i - 1, j, string_number, residue_index)
if now == self.score_matrix[i - 1][j - 1] + self.substitution_matrix[c1][c2]:
return self.contains_residue(i - 1, j - 1, string_number, residue_index)
def is_residue_aligned(self, string_number, residue_index):
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been alined
False otherwise
"""
max_i = np.argmax(self.score_matrix)
n, m = self.score_matrix.shape
i = int(max_i / m)
j = max_i % m
if not self.has_alignment():
return False
return self.contains_residue(i, j, string_number, residue_index)
<file_sep>import numpy as np
from math import log
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
self.sequences = sequences
if sequences == []:
raise TypeError('MSA must contain at least one sequece')
temp = len(sequences[0])
for alignment in sequences:
if temp != len(alignment):
raise TypeError('MSA alignments must have same length')
if not self.checkDNA(alignment):
raise TypeError('MSA alignments contain wrong characters')
temp = len(alignment)
def checkDNA(self, string):
counter = 0
for x in ALPHABET:
counter += string.count(x)
return counter == len(string)
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
# Calculate sequence weights
# Count (with weights) observed amino acids and gaps Redistribute gaps according to background frequencies
# Add weighted pseudocounts
# Normalize to relative frequencies
# Divide by background frequencies
# Calculate Log-Score
# Remove rows corresponding to gaps in the primary sequence (here the primary sequence is the first one in the MSA)
x_axis = len(self.sequences[0]) # length of the primary sequence
y_axis = len(self.sequences) # number of alignments
alphabet_without_gaps = len(ALPHABET)-1 # all amino acids without gap
alphabet = len(ALPHABET) # all amino acids without gap
pssm = np.zeros((x_axis,len(ALPHABET))) # all amino acids with gap (row -> position in sequence, column -> amino acid/gap)
# Calculate sequence weights
if(use_sequence_weights):
sequence_weights = self.get_sequence_weights()
for letter in range(0, x_axis): # loop through the length of the sequences
for alignment in range(0, y_axis): # loop through the number of alignments
for amino_acid in (0, alphabet_without_gaps):
if self.sequences[alignment][letter] == INT_TO_AA[amino_acid]:
pssm[letter, amino_acid] += sequence_weights[alignment]
# Count (with weights) observed amino acids
rows = np.zeros(x_axis).astype(np.int64) # counter for the sum in rows
counter_aa = 0
for letter in range(0,x_axis): # loop through the number of letters
for amino_acid in ALPHABET: # loop through all the amino acids
for alignment in range(0, y_axis):
if self.sequences[alignment][letter] == amino_acid and self.sequences[alignment][letter] != '-':
if use_sequence_weights:
pssm[letter, AA_TO_INT[amino_acid]] += sequence_weights[alignment]
else:
pssm[letter, AA_TO_INT[amino_acid]] += 1
counter_aa += 1 #increase the total number of occurences in a row
rows[letter] = counter_aa
counter_aa = 0
# Redistribute gaps according to background frequencies
if(redistribute_gaps):
rows_gaps = np.zeros(x_axis)
counter_gaps = 0
for letter in range(0,x_axis):
for alignment in range(0, y_axis):
if self.sequences[alignment][letter] == '-':
counter_gaps += 1
rows_gaps[letter] = counter_gaps # divide by background frequency
counter_gaps = 0
for letter in range(0,x_axis):
for amino_acid in range(0,alphabet_without_gaps):
pssm[letter,amino_acid] += rows_gaps[letter]*0.05
""" import sys
np.set_printoptions(threshold=sys.maxsize)
print(pssm) """
# Add weighted pseudocounts
if add_pseudocounts:
pass
# Normalize to relative frequencies
# Divide by background frequencies
for x in range(0, x_axis): # loop through the number of letters
for y in range(0, alphabet_without_gaps): # loop through all the amino acids
pssm[x,y] /= rows[x]
if bg_matrix != None:
pssm[x,y] /= np.sum(bg_matrix[y])
else:
pssm[x,y] /= 0.05
# Calculate Log-Score
for x in range(0, x_axis):
for y in range(0, alphabet):
if pssm[x,y] > 0:
pssm[x,y] = 2*log(pssm[x,y],2)
else:
pssm[x,y] = -20
for x in range(x_axis-1,-1,-1):
if self.sequences[0][x] == '-':
pssm = np.delete(pssm, x, 0)
pssm = np.delete(pssm, GAP_INDEX, 1)
pssm = np.rint(pssm).astype(np.int64)
import sys
np.set_printoptions(threshold=sys.maxsize)
print(pssm)
return pssm
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return (len(self.sequences), len(self.sequences[0]))
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
string = []
for x in self.sequences[0]:
if x != '-':
string.append(x)
return ''.join(string)
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
x_axis = len(self.sequences[0])
y_axis = len(self.sequences)
pssm_weighted = np.zeros((x_axis, y_axis)) #create array for the weights [letters * words]
pssm_letters = np.chararray((x_axis, y_axis), unicode=True) #create array of letters [letters * words]
for x in range(0, x_axis):
for y in range(0, y_axis):
pssm_letters[x,y] = self.sequences[y][x] # fill array with all the letters
pssm_r = np.zeros(x_axis) # array for the number of different letters
pssm_s = np.zeros((x_axis, y_axis)) # array with the number of occurences of the different letters
observed = []
listOfLetters = []
for x in range(0, x_axis): # loop through number of letters
for y in range (0, y_axis): # loop through number of alignments
observed.append(pssm_letters[x,y]) # a string of only the first letters
if not self.checkLetterInString(listOfLetters, pssm_letters[x,y]): # test if the list of letters has a specific letter
listOfLetters.append(pssm_letters[x,y]) # if the letter hasn't been added yet, add it now
pssm_r[x] = len(listOfLetters) #number of different letters at this position
for y in range (0, y_axis): # loop through the number of sequences
pssm_s[x,y] = observed.count(pssm_letters[x,y])
listOfLetters = []
observed = []
for x in range(0, x_axis):
for y in range (0, y_axis):
pssm_weighted[x,y] = 1/(pssm_s[x,y]*pssm_r[x])
sequence_weights = []
weight = 0
for y in range(0, y_axis): # loop through the number of alignments
for x in range(0, x_axis): # loop through the number of letters
if pssm_r[x] != 1:
weight += pssm_weighted[x,y] # calculate sequence weights
sequence_weights.append(weight)
weight = 0
return sequence_weights
def checkLetterInString(self, string, letter):
for x in string: # loop through the string
if x == letter: # return if letter is in the string
return True
return False
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
x_axis = len(self.sequences[0])
y_axis = len(ALPHABET)
rows = [[]]
counter = 0
for x_index in range(0, x_axis): # pick through the letters
# loop through all the amino acids
for amino_acid in range(0, y_axis):
for alignment in self.sequences:
if not alignment[x_index] in rows[x_index]:
rows[x_index].append(alignment[x_index])
counter += 1 # increase the total number of occurences in a row
rows.append([])
num_obs = float(counter)/x_axis
return np.float64(num_obs)
def main():
print('run this shit')
sequences = [
"SE-AN",
"SEV-N"
]
msa = MSA(sequences)
print(msa.get_pssm())
if __name__ == "__main__":
main()
<file_sep>##############
# Exercise 2.7
##############
positive_charged ={"R","K","H"}
negative_charged = {"D","E"}
hydrophobic = {"A","I","L","M","F","W","Y","V"}
aromatic = {"F","W","Y","H"}
polar = {"S","T","N","Q","H","R","D","E","K","Y"}
unique = {"G","P","C"}
proline = {"P"}
sulfer = {"C","M"}
acid = {"D","E"}
basic = {"R","H","K"}
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
if aa in positive_charged:return True
else:return False
def isNegativelyCharged(aa):
if aa in negative_charged: return True
else:return False
def isHydrophobic(aa):
if aa in hydrophobic:return True
else:return False
def isAromatic(aa):
if aa in aromatic:return True
else:return False
def isPolar(aa):
if aa in polar:return True
else:return False
def isProline(aa):
if aa in proline: return True
else:return False
def containsSulfur(aa):
if aa in sulfer: return True
else:return False
def isAcid(aa):
if aa in acid: return True
else:return False
def isBasic(aa):
if aa in basic: return True
else:return False
<file_sep>##############
# Exercise 2.6
##############
# Version avec des fonctions sclassiques
def read_fasta(filename):
"""
Input : fasta file name
Output : return a tuple consisting of the header and the sequence body
header H and body B contained in the file
Method : open and read out one sequence given in FASTA format
"""
f=open(filename,'r')
num_lines = sum(1 for line in open(filename))
H=[]
B=[]
pairs=[]# List of tuples (header, sequence)
i_line=0
print('started')
seq=''
for line in f:
print('line')
print(line)
if line.startswith('>') or line.startswith(';'): # header detected
if seq!='':
B.append(seq)
seq=''
header = line.strip() # line.strip() may remove useful caracter
H.append(header)
print('header')
print(header)
else:
seq += line.strip() # line.strip() may remove useful caracter
seq=seq.replace('*','')
print('seq')
print(seq)
if i_line==num_lines-1:
B.append(seq) # we suppose that the sequence is only one line
# otherwise, just change the indent <-
i_line += 1
for i in range(len(H)):
pairs.append((H[i],B[i]))
f.close()
return pairs
def get_counts(l):
"""
Goal : count the number of read sequences and returns the number as integer
Input : results from read_fasta, list of tuples
Output : integer
"""
return len(l)
def get_average_length(l):
"""
Goal : calculate the average sequence length
Input : list of tuples (header, sequence)
Output : average length of a sequence, float
"""
c=0 # initialise counter
for (h,seq) in l:
c+=len(seq)
return c/len(l)
def get_abs_frequencies(l):
"""
Goal : count the occurrence for every amino acid over all proteins
Input : list of tuples (header, sequence)
Output : dictionary key=aa one letter code, value = total number of occurences in list
"""
dic={'A':0, 'R':0, 'D':0, 'N':0, 'C':0, 'E':0, 'Q':0, 'G':0, 'H':0, 'I':0, 'L':0, 'K':0, 'M':0, 'F':0, 'P':0, 'S':0, 'T':0, 'W':0, 'Y':0, 'V':0 }
for (h,seq) in l :
for c in seq:
dic[c]+=1
return dic
### ? frequency => absolute = number of occurences ? not divided by avarage length of a protein ?
def get_av_frequencies(l):
"""
Goal : calculate the average amino acid composition over all read protein sequences
Input : name of the file to process
Output : dictionary where key = aa, value = average frequency
"""
# l=read_fasta(filename) # get the (header, sequence) list
dic=get_abs_frequencies(l) # get number of occurences in total, for all sequences
c=0 # initialise counter
for (h,seq) in l:
c+=len(seq)
avg_dic={'A':0, 'R':0, 'D':0, 'N':0, 'C':0, 'E':0, 'Q':0, 'G':0, 'H':0, 'I':0, 'L':0, 'K':0, 'M':0, 'F':0, 'P':0, 'S':0, 'T':0, 'W':0, 'Y':0, 'V':0 } # initialize the average composition
for aa in dic:
avg_dic[aa]=dic[aa]/c
return avg_dic
# Version avec classe
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def read_fasta(self, path):
self.liste=read_fasta(path)
return self.liste
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
return len(self.liste)
def get_average_length(self):
return get_average_length(self.liste)
def get_abs_frequencies(self):
# return number of occurences not normalized by length
return get_abs_frequencies(self.liste)
def get_av_frequencies(self):
# return number of occurences normalized by length
return get_av_frequencies(self.liste)
<file_sep>import re
import numpy as np
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = "ACDEFGHIKLMNPQRSTVWY-"
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT["-"]
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
if len(sequences) == 0:
raise TypeError
for s in sequences:
if len(s) == 0 or len(s) != len(sequences[0]):
raise TypeError
if re.match(r"^[ACDEFGHIKLMNPQRSTVWY-]+$", s) is None:
raise TypeError
self.seq = sequences
self.seq_T = []
self.r_i = []
for idx in range(len(self.seq[0])):
col = "".join([s[idx] for s in self.seq])
r_idx = len(set(col))
self.seq_T.append(col)
self.r_i.append(r_idx)
def get_pssm(
self,
*,
bg_matrix=None,
beta=10,
use_sequence_weights=False,
redistribute_gaps=False,
add_pseudocounts=False
):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
alpha = self.get_number_of_observations() - 1
if use_sequence_weights:
weights = self.get_sequence_weights()
bg_freq = 0.05
pssm = np.zeros((len(self.get_primary_sequence()), 20))
idx = 0
for _, col in enumerate(self.seq_T):
if col[0] == "-":
continue
temp_pssm = np.zeros(21)
for char in set(col):
if use_sequence_weights:
# col indexes for the character in set
c_idxs = [i for i, c in enumerate(col) if c == char]
temp_pssm[AA_TO_INT[char]] = sum(weights[c_idxs])
else:
temp_pssm[AA_TO_INT[char]] = col.count(char)
if redistribute_gaps:
if bg_matrix:
for a in range(20):
temp_pssm[a] += temp_pssm[-1] * sum(bg_matrix[a])
else:
temp_pssm += temp_pssm[-1] * bg_freq
pssm[idx] = temp_pssm[0:-1]
# pseudo
if add_pseudocounts:
tmp2_pssm = np.zeros(20)
for a in range(20):
total = 0
for j in range(20):
if bg_matrix:
P_j = sum(bg_matrix[j])
q_j_a = bg_matrix[j][a]
else:
P_j = 0.05
q_j_a = 0.0025
f_i_j = pssm[idx][j]
total += (f_i_j / P_j) * q_j_a
tmp2_pssm[a] = (alpha * pssm[idx][a] + beta * total) / (
alpha + beta
)
pssm[idx] = tmp2_pssm
# Normalize
row_sum = sum(pssm[idx])
pssm[idx] /= row_sum
# bg
if bg_matrix:
for a in range(20):
pssm[idx][a] /= sum(bg_matrix[a])
else:
pssm[idx] /= bg_freq
pssm[idx] = np.log2(pssm[idx]) * 2
pssm[idx][pssm[idx] == -np.inf] = -20
idx += 1
return np.rint(pssm).astype(np.int64)
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return (len(self.seq), len(self.seq[0]))
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
return self.seq[0].replace("-", "")
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
weights = np.zeros(len(self.seq))
for idx, col in enumerate(self.seq_T):
if self.r_i[idx] == 1:
continue
for k in range(len(self.seq)):
s_ik = col.count(self.seq[k][idx])
weights[k] += 1 / (self.r_i[idx] * s_ik)
return weights.astype(np.float64)
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
num_obs = sum(self.r_i) / len(self.seq[0])
return np.float64(num_obs)
<file_sep>import numpy as np
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.align()
def appendUnique(self, firstList, secondList):
resultingList = firstList
for i in secondList:
if i not in resultingList:
resultingList.append(i)
return resultingList
# search from One path
def isFinalScorePath(self, scorePath):
#print("scorePath:", scorePath)
# if scorepath is empty
if len(scorePath) == 0:
return False
# take the last cell coordinates of the path
lastCell = scorePath[-1]
#print("last cell of path:", lastCell)
i = lastCell[0]
j = lastCell[1]
# test if left top cell
# Congratulations we have found a valid score path
if i == 0 and j == 0:
return True
else:
return False
# search from One path
def getFinalScorePaths(self, scorePath):
finalScorePaths = []
pathStack = []
#print("getFinalScorePaths(", scorePath, ")")
# if scorepath is empty
if len(scorePath) == 0:
return [[[]]]
# Init the exploration stack with the starting cell
pathStack = scorePath.copy()
#print("Processing ", pathStack)
if self.isFinalScorePath(pathStack) == True:
#print("final path found:", pathStack)
#path ends with (0,0)
newPaths = [pathStack.copy()]
finalScorePaths = self.appendUnique(finalScorePaths, newPaths)
#print("finalScorePaths", finalScorePaths)
else:
# otherwise not a viable path
# try to explore 3 neighboors
# cell other than (0,0)
# take the last cell coordinates of the path
startingCell = scorePath[-1]
i = startingCell[0]
j = startingCell[1]
# horizontal
if i > 0 and self.score_matrix[i-1][j]+self.gap_penalty==self.score_matrix[i][j]:
#print("")
#print("horizontal")
nextCell = [i-1, j]
pathStack.append(nextCell)
newPaths = self.getFinalScorePaths(pathStack)
finalScorePaths = self.appendUnique(finalScorePaths, newPaths)
pathStack.pop()
# diagonal
current_score=self.score_matrix[i][j]
diag_score=self.score_matrix[i-1][j-1]
if current_score==diag_score+self.substitution_matrix[self.string2[i-1]][self.string1[j-1]]:
precedent=True
else:
precedent=False
if i > 0 and j > 0 and precedent:
#print("")
#print("diagonal")
nextCell = [i-1, j-1]
pathStack.append(nextCell)
newPaths = self.getFinalScorePaths(pathStack)
finalScorePaths = self.appendUnique(finalScorePaths, newPaths)
pathStack.pop()
# vertical
if j > 0 and self.score_matrix[i][j-1]+self.gap_penalty==self.score_matrix[i][j]:
#print("")
#print("vertical")
nextCell = [i, j-1]
pathStack.append(nextCell)
newPaths = self.getFinalScorePaths(pathStack)
finalScorePaths = self.appendUnique(finalScorePaths, newPaths)
pathStack.pop()
return finalScorePaths
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
##### Score matrix #####
S=np.zeros((len(self.string2) + 1, len(self.string1) + 1), dtype=np.int) # score matrix
# to be constructed step by step and then score_matrix <- S
for i in range(len(self.string2)+1):
S[i,0]=i*self.gap_penalty
for j in range(len(self.string1)+1):
S[0,j]=j*self.gap_penalty
# print(S)
for k in range(2,len(self.string1)+len(self.string2)+1):
for i in range(1, len(self.string2)+1):
for j in range(1, len(self.string1)+1):
S[i,j]=max(S[i-1][j-1]+self.substitution_matrix[self.string2[i-1]][self.string1[j-1]], S[i-1][j]+self.gap_penalty, S[i][j-1]+self.gap_penalty)
print(S)
self.score_matrix=S
##### Finding Valid Score Paths #####
scorePath = [[len(self.string2), len(self.string1)]]
finalScorePaths = self.getFinalScorePaths(scorePath)
#print(finalScorePaths)
alignments=[]
# Converstion path to string
for path in finalScorePaths:
seq1=''
seq2=''
path.reverse()
for k in range(len(path)):
# attention : coeff (i, j ) in score_matrix corresponds
# to letters i+1 in string2, j+1 in string1
[i,j]=path[k] # k-th step
if k>0:
# diagonal step
if path[k-1][0]==i-1 and path[k-1][1]==j-1:
letter2=self.string2[i-1]
letter1=self.string1[j-1]
seq2+=letter2
seq1+=letter1
# horizontal step
if path[k-1][0]==i and path[k-1][1]==j-1:
# add gap in string2
letter2='-'
letter1=self.string1[j-1]
seq2+=letter2
seq1+=letter1
# vertical step
if path[k-1][0]==i-1 and path[k-1][1]==j:
# add gap in string1
letter2=self.string2[i-1]
letter1='-'
seq2+=letter2
seq1+=letter1
alignments.append((seq1, seq2))
#print("And the alignments are : ")
print(alignments)
return alignments
def get_best_score(self):
"""
:return: the highest score for the aligned strings, int
"""
return self.score_matrix[len(self.string2)][len(self.string1)]
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score
"""
return len(self.align())
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
return self.align()
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as a list of lists
"""
return self.score_matrix
<file_sep># -*- coding: utf-8 -*-
"""
IMPORTANT!:
Before writing an email asking questions such as
'What does this input has to be like?' or
'What return value do you expect?' PLEASE read our
exercise sheet and the information in this template
carefully.
If something is still unclear, PLEASE talk to your
colleagues before writing an email!
If you experience technical issues or if you find a
bug we are happy to answer your questions. However,
in order to provide quick help in such cases we need
to avoid unnecessary emails such as the examples
shown above.
"""
from Bio.PDB.MMCIFParser import MMCIFParser # Tip: This module might be useful for parsing...
import numpy as np
'''
Bio.PDB.Residue.Residue
['__class__', '__contains__', '__delattr__', '__delitem__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__getitem__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__iter__', '__le__', '__len__', '__lt__', '__module__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', '__weakref__', '_generate_full_id', '_id', '_reset_full_id',
'add', 'child_dict', 'child_list', 'copy', 'detach_child', 'detach_parent', 'disordered', 'flag_disordered', 'full_id', 'get_atom', 'get_atoms', 'get_full_id', 'get_id', 'get_iterator', 'get_level',
'get_list', 'get_parent', 'get_resname', 'get_segid', 'get_unpacked_list', 'has_id', 'id', 'insert', 'is_disordered', 'level', 'parent', 'resname', 'segid', 'set_parent', 'sort', 'transform', 'xtra'
]
'''
############# Exercise 2: Protein Data Bank #############
# General remark: In our exercise every structure will have EXACTLY ONE model.
# This is true for nearly all X-Ray structures. NMR structures have several models.
class PDB_Parser:
CIF_PARSER = MMCIFParser(QUIET=True) # parser object for reading in structure in CIF format
codon_dict = {
'ALA': 'A',
'ARG': 'R',
'ASN': 'N',
'ASP': 'D',
'CYS': 'C',
'GLN': 'Q',
'GLU': 'E',
'GLY': 'G',
'HIS': 'H',
'ILE': 'I',
'LEU': 'L',
'LYS': 'K',
'MET': 'M',
'PHE': 'F',
'PRO': 'P',
'SER': 'S',
'THR': 'T',
'TRP': 'W',
'TYR': 'Y',
'VAL': 'V'
}
def __init__( self, path ):
'''
Initialize every PDB_Parser with a path to a structure-file in CIF format.
An example file is included in the repository (7ahl.cif).
Tip: Store the parsed structure in an object variable instead of parsing it
again & again ...
'''
# Parse the structure once and re-use it in the functions below
self.structure = self.CIF_PARSER.get_structure("CIF", path)
print(type(self.structure))
for chain in self.structure.get_chains():
print(chain, " ", type(chain), " len ", len(chain))
# for model in self.structure:
# print("Model ", model, ", len ", len(model))
# for chain in model:
# print("\tChain ", chain, ", len ", len(chain))
# 3.8 Chains
def get_number_of_chains( self ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
Return:
Number of chains in this structure as integer.
'''
chains = list(self.structure.get_chains())
#print(chains)
n_chains = len(chains)
return n_chains
# 3.9 Sequence
def get_sequence( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the amino acid sequence (single-letter alphabet!) of a given chain (chain_id)
in a Biopython.PDB structure as a string.
'''
for chain in self.structure.get_chains():
print(chain, " ", type(chain))
if chain.get_id() == chain_id:
sequence = ''.join( [self.codon_dict[r.resname] for r in chain.get_residues() if r.resname in self.codon_dict.keys()] )
#print(type(next(chain.get_residues())), " ", dir(next(chain.get_residues())))
return sequence
else:
print("Failed to find chain!")
return None
# 3.10 Water molecules
def get_number_of_water_molecules( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the number of water molecules of a given chain (chain_id)
in a Biopython.PDB structure as an integer.
'''
for chain in self.structure.get_chains():
print(chain, " ", type(chain))
if chain.get_id() == chain_id:
n_waters = len([r.resname for r in chain.get_residues() if r.resname == "HOH"])
return n_waters
else:
print("Failed to find chain!")
return None
# 3.11 C-Alpha distance
def get_ca_distance( self, chain_id_1, index_1, chain_id_2, index_2 ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id_1 : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
index_1 : index of a residue in a given chain in a Biopython.PDB structure
chain_id_2 : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
index_2 : index of a residue in a given chain in a Biopython.PDB structure
chain_id_1 and index_1 describe precisely one residue in a PDB structure,
chain_id_2 and index_2 describe the second residue.
Return:
Return the C-alpha (!) distance between the two residues, described by
chain_id_1/index_1 and chain_id_2/index_2. Round the returned value via int().
The reason for using two different chains as an input is that also the distance
between residues of different chains can be interesting.
Different chains in a PDB structure can either occur between two different proteins
(Heterodimers) or between different copies of the same protein (Homodimers).
<class 'Bio.PDB.Residue.Residue'>
dict_keys(['level', 'disordered', 'resname', 'segid', '_id', 'full_id', 'parent', 'child_list', 'child_dict', 'xtra'])
['__class__', '__contains__', '__delattr__', '__delitem__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__getitem__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__iter__', '__le__', '__len__', '__lt__', '__module__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', '__weakref__', '_generate_full_id', '_id', '_reset_full_id',
'add', 'child_dict', 'child_list', 'copy', 'detach_child', 'detach_parent', 'disordered', 'flag_disordered', 'full_id',
'get_atom', 'get_atoms', 'get_full_id', 'get_id', 'get_iterator', 'get_level', 'get_list', 'get_parent', 'get_resname', 'get_segid', 'get_unpacked_list',
'has_id', 'id', 'insert', 'is_disordered', 'level', 'parent', 'resname', 'segid', 'set_parent', 'sort', 'transform', 'xtra']
<class 'Bio.PDB.Atom.Atom'>
dict_keys(['level', 'parent', 'name', 'fullname', 'coord', 'bfactor', 'occupancy', 'altloc', 'full_id', 'id', 'disordered_flag', 'anisou_array', 'siguij_array', 'sigatm_array', 'serial_number', 'xtra', 'element', 'mass', '_sorting_keys'])
['__class__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__le__', '__lt__', '__module__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__sub__', '__subclasshook__', '__weakref__', '_assign_atom_mass', '_assign_element', '_sorting_keys',
'altloc', 'anisou_array', 'bfactor', 'coord', 'copy', 'detach_parent', 'disordered_flag', 'element', 'flag_disorder', 'full_id', 'fullname', 'get_altloc',
'get_anisou', 'get_bfactor', 'get_coord', 'get_full_id', 'get_fullname', 'get_id', 'get_level',
'get_name', 'get_occupancy', 'get_parent', 'get_serial_number', 'get_sigatm', 'get_siguij', 'get_vector',
'id', 'is_disordered', 'level', 'mass', 'name', 'occupancy', 'parent', 'serial_number',
'set_altloc', 'set_anisou', 'set_bfactor', 'set_coord', 'set_occupancy', 'set_parent', 'set_serial_number', 'set_sigatm', 'set_siguij',
'sigatm_array', 'siguij_array', 'transform', 'xtra']
'''
chain_1 = None
chain_2 = None
#print("Looking for id ", chain_id_1, " and ", chain_id_2)
for chain in self.structure.get_chains():
#print("Compare against ", chain.id)
if chain.id == chain_id_1:
chain_1 = chain
if chain.id == chain_id_2:
chain_2 = chain
if chain_1 is None or chain_2 is None:
print("Failed to find on or more chains in get_ca_distance!")
return
# if len(self.structure) > 1:
# print("Cannot handle multi-model structure!")
# chain_1 = self.structure[0][chain_id_1]
# chain_2 = self.structure[0][chain_id_2]
aa_1 = chain_1[index_1]
aa_2 = chain_2[index_2]
#print(type(aa_1))
#print(dir(aa_1))
#print(aa_1.__dict__.keys())
print(aa_1, " to ", aa_2)
# https://bioinformatics.stackexchange.com/questions/783/how-can-we-find-the-distance-between-all-residues-in-a-pdb-file
try:
ca_distance = aa_1['CA'] - aa_2['CA']
except KeyError:
## no CA atom, e.g. for H_NAG
ca_distance = -1
return int( ca_distance )
# 3.12 Contact Map
def get_contact_map( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return a complete contact map (see description in exercise sheet)
for a given chain in a Biopython.PDB structure as numpy array.
The values in the matrix describe the c-alpha distance between all residues
in a chain of a Biopython.PDB structure.
Only integer values of the distance have to be given (see below).
'''
chain = None
for it_chain in self.structure.get_chains():
if it_chain.id == chain_id:
chain = it_chain
if chain is None:
print("Failed to find on or more chains in get_ca_distance!")
return
sequence = [r for r in chain if r.resname in self.codon_dict.keys()]
length = len(sequence)
contact_map = np.zeros( (length,length), dtype=np.int64 )
for idx_1, aa_1 in enumerate(sequence):
for idx_2, aa_2 in enumerate(sequence):
contact_map[idx_1][idx_2] = int(aa_1['CA'] - aa_2['CA'])
return contact_map # .astype( np.int ) # return rounded (integer) values
# 3.13 B-Factors
def get_bfactors( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the B-Factors for all residues in a chain of a Biopython.PDB structure.
The B-Factors describe the mobility of an atom or a residue.
In a Biopython.PDB structure B-Factors are given for each atom in a residue.
Calculate the mean B-Factor for a residue by averaging over the B-Factor
of all atoms in a residue.
Sometimes B-Factors are not available for a certain residue;
(e.g. the residue was not resolved); insert np.nan for those cases.
Finally normalize your B-Factors using Standard scores (zero mean, unit variance).
You have to use np.nanmean, np.nanvar etc. if you have nan values in your array.
The returned data structure has to be a numpy array rounded again to integer.
'''
chain = None
for it_chain in self.structure.get_chains():
if it_chain.id == chain_id:
chain = it_chain
if chain is None:
print("Failed to find on or more chains in get_ca_distance!")
return
sequence = [r for r in chain if r.resname in self.codon_dict.keys()]
length = len(sequence)
b_factors = np.zeros( length )
for idx, aa in enumerate(sequence):
try:
atoms_bfactor = [a.get_bfactor() for a in aa]
b_factors[idx] = np.mean(atoms_bfactor)
except KeyError:
print("Failed to resolve bfacors")
b_factors[idx] = np.nan
normed = np.divide( np.subtract(b_factors, np.nanmean(b_factors)), np.nanstd(b_factors) ).astype( np.int64 )
return normed # b_factors.astype( np.int ) # return rounded (integer) values
def main():
print('PDB parser class.')
parser = PDB_Parser("./tests/7ahl.cif")
print(parser.get_bfactors("A"))
return None
if __name__ == '__main__':
main()<file_sep>import numpy as np
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.index1 = []
self.index2 = []
self.max = 0
self.pos = None
self.align()
def align(self):
"""
Align given strings using the Smith-Waterman algorithm.
NB: score matrix and the substitution matrix are different matrices!
"""
self.score_matrix[0, :] = np.zeros(len(self.string1)+1)
self.score_matrix[:, 0] = np.zeros(len(self.string2)+1)
for i in range(1, len(self.string2) + 1):
for k in range(1, len(self.string1) + 1):
match = self.score_matrix[i-1, k-1] + self.substitution_matrix[self.string2[i-1]][self.string1[k-1]]
delete = self.score_matrix[i-1, k] + self.gap_penalty
insert = self.score_matrix[i, k-1] + self.gap_penalty
self.score_matrix[i, k] = max(0, match, delete, insert)
if self.score_matrix[i, k] > self.max:
self.max = self.score_matrix[i, k]
self.pos = (i,k)
def has_alignment(self):
"""
:return: True if a local alignment has been found, False otherwise
"""
alignment_b, alignment_a = self.get_alignment()
return not (alignment_b == '' and alignment_a == '')
def get_alignment(self):
"""
:return: alignment represented as a tuple of aligned strings
"""
i, k = np.unravel_index(self.score_matrix.argmax(), self.score_matrix.shape)
alignment_a = ''
alignment_b = ''
while i > 0 or k > 0:
if i > 0 and k > 0 and self.score_matrix[i, k] == self.score_matrix[i-1, k-1] + \
self.substitution_matrix[self.string2[i - 1]][self.string1[k-1]]:
alignment_a = self.string2[i-1] + alignment_a
alignment_b = self.string1[k-1] + alignment_b
self.index2.append(i-1)
self.index1.append(k-1)
i -= 1
k -= 1
elif i > 0 and self.score_matrix[i, k] == self.score_matrix[i-1, k] + self.gap_penalty:
alignment_a = self.string2[i-1] + alignment_a
alignment_b = '-' + alignment_b
i -= 1
else:
alignment_a = '-' + alignment_a
alignment_b = self.string1[k-1] + alignment_b
k -= 1
alignment_len = len(alignment_b)
index = 0
while alignment_len > 0:
while alignment_b[0] == '-':
alignment_a = alignment_a[:index] + alignment_a[index+1:]
alignment_b = alignment_b[:index] + alignment_b[index+1:]
alignment_len -= 1
alignment_len = len(alignment_a)
while alignment_len > 0:
while alignment_a[0] == '-':
alignment_a = alignment_a[:index] + alignment_a[index+1:]
alignment_b = alignment_b[:index] + alignment_b[index+1:]
alignment_len -= 1
return (alignment_b, alignment_a)
def is_residue_aligned(self, string_number, residue_index):
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been alined
False otherwise
"""
index = self.index1 if string_number == 1 else self.index2
return residue_index in index
<file_sep>##############
# Exercise 2.6
##############
import collections, functools, operator
import os
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.fasta = {}
self.fasta = self.read_fasta(filepath)
def aa_dist_lengthy(self, aa_seq):
counted = collections.Counter(aa_seq)
for key in counted:
counted[key] /= len(aa_seq)
return counted
def aa_dist(self, aa_seq):
counted = collections.Counter(aa_seq)
return counted
def get_counts(self):
return len(self.fasta)
def get_average_length(self):
fasta = self.fasta
count=0
sum = 0
for key in fasta.values():
count += 1
sum += len(key)
avg = float(sum/count)
return avg
def read_fasta(self, path):
fastaFile = open(path, "r")
prots = {}
begin = True
for line in fastaFile:
slin= line.strip()
if slin.startswith(">"):
if begin == False:
prots[header] = seq
seq = ""
header = slin[1:].strip()
begin = False
else:
seq = seq + slin
if seq.endswith('*'):
seq = seq[:-1]
prots[header] = seq
return(prots)
def get_abs_frequencies(self):
freqs = {}
# return number of occurences not normalized by length
for keys, values in self.fasta.items():
freqs[keys] = self.aa_dist(values)
result = dict(functools.reduce(operator.add,
map(collections.Counter, freqs.values())))
return result
def get_av_frequencies(self):
# return number of occurences normalized by length
# size = {}
# resultingFreqs = {}
# freqs = {}
# for keys, values in self.fasta.items():
# freqs[keys] = self.aa_dist(values)
# size[keys] = len(values)
# result = dict(functools.reduce(operator.add,
# map(collections.Counter, freqs.values())))
# for key, values in result.items() :
# resultingFreqs[key] = values/size[key]
# return resultingFreqs
freq = collections.Counter(self.fasta.values())
N = len(self.fasta.values())
for sequence in self.fasta.values():
freq += collections.Counter(sequence[1])
N += len(sequence[1])
for c in freq.keys():
freq[c] /= N
return freq
def setup():
relative_path = os.path.dirname(__file__)
dist = AADist(relative_path + '/tests.fasta')
return dist
f= setup()
print(f.get_av_frequencies())
<file_sep>import numpy as np
class Direction:
Left = 0b100
Digaonal = 0b010
Up = 0b001
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros(
(len(string2) + 1, len(string1) + 1), dtype=np.int)
# The trace matrix encodes for each field (i, j) in the score matrix how
# we reached this field. We encode this information in a single integer as
# follows: We use a 1-hot-encoding where the first position marks the field
# 'left', the second marks 'diagonal' and the third one marks 'up', e.g.,
#
# trace_matrix[i][j] = 101 <=> we reached (i, j) from (i-1, j) or from
# (i, j-1)
self.trace_matrix = np.zeros(
(len(string2) + 1, len(string1) + 1), dtype=np.int32)
self.align()
def align(self):
"""
Align given strings using the Smith-Waterman algorithm.
NB: score matrix and the substitution matrix are different matrices!
"""
for i in range(len(self.string2) + 1):
self.score_matrix[i][0] = 0
self.trace_matrix[i][0] = 0b0
for j in range(len(self.string1) + 1):
self.score_matrix[0][j] = 0
self.trace_matrix[0][j] = 0b0
for i in range(1, len(self.string2) + 1):
for j in range(1, len(self.string1) + 1):
diag_score = self.score_matrix[i - 1][j - 1] + \
self.substitution_matrix[self.string2[i - 1]][self.string1[j-1]]
up_score = self.score_matrix[i - 1][j] + self.gap_penalty
left_score = self.score_matrix[i][j - 1] + self.gap_penalty
zero_score = 0
self.score_matrix[i][j] = max(
diag_score, up_score, left_score, zero_score)
# Trace the calculation
trace = 0
if self.score_matrix[i][j] == diag_score:
trace |= Direction.Digaonal
elif self.score_matrix[i][j] == up_score:
trace |= Direction.Up
elif self.score_matrix[i][j] == left_score:
trace |= Direction.Left
elif self.score_matrix[i][j] == zero_score:
trace |= 0b0
self.trace_matrix[i][j] = trace
def get_aligning_path(self):
path = []
max_i, max_j = np.unravel_index(
self.score_matrix.argmax(), self.score_matrix.shape)
path.append((max_i, max_j))
i = max_i
j = max_j
while True:
if self.trace_matrix[i][j] & Direction.Digaonal == Direction.Digaonal:
path.append((i - 1, j - 1))
i -= 1
j -= 1
elif self.trace_matrix[i][j] & Direction.Up == Direction.Up:
path.append((i - 1, j))
i -= 1
elif self.trace_matrix[i][j] & Direction.Left == Direction.Left:
path.append((i, j-1))
j -= 1
elif self.trace_matrix[i][j] & 0b0 == 0b0:
break
return path
def has_alignment(self):
"""
:return: True if a local alignment has been found, False otherwise
"""
if (self.get_aligning_path() == [(0, 0)]):
return False
return True
def get_alignment(self):
"""
:return: alignment represented as a tuple of aligned strings
"""
if not self.has_alignment():
return ('', '')
alignments = []
path = self.get_aligning_path()[:-1][::-1]
alignment = ["", ""]
old_i = 0
old_j = 0
for pair in path:
i = pair[0]
j = pair[1]
if old_i != i and old_j != j:
alignment[0] += self.string1[j-1]
alignment[1] += self.string2[i-1]
elif old_j == j:
alignment[0] += "-"
alignment[1] += self.string2[i-1]
else:
alignment[0] += self.string1[j-1]
alignment[1] += "-"
old_i = i
old_j = j
return (alignment[0], alignment[1])
def is_residue_aligned(self, string_number, residue_index):
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been alined
False otherwise
"""
path = self.get_aligning_path()
if (path == [(0, 0)]):
return False
if string_number == 1:
for pair in path:
if pair[1] - 1 == residue_index:
return True
if string_number == 2:
for pair in path:
if pair[0] - 1 == residue_index:
return True
return False
<file_sep>##############
# Exercise 2.6
##############
from collections import Counter
import string
import re
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.__filepath = filepath
self.read_fasta(filepath)
def aa_dist(self, aa_seq):
counted = Counter(aa_seq)
for key in counted:
counted[key] /= len(aa_seq)
return counted
def read_fasta(self, filename):
sequence_ids = ""
sequence = ""
with open(filename) as file_opened:
for line in file_opened:
line = line.strip('\n\r');
if not line or line.startswith(">") or line.startswith(";"):
if sequence_ids:
if sequence.endswith("*"):
sequence = sequence[:-1]
self.__sequences.append(sequence.replace(' ', ''))
sequence_ids = line[1:]
sequence = ''
continue
sequence = sequence + line
if sequence:
self.__sequences.append(sequence.replace(' ',''))
def get_counts(self):
return len(self.__sequences)
def get_abs_frequencies(self):
result = []
for sequence in self.__sequences:
for char in sequence:
result.append(char)
return dict((i, result.count(i)) for i in set(result))
def get_average_length(self):
length = 0.0
for sequence in self.__sequences:
length += len(sequence.strip())
return length/self.get_counts()
def get_av_frequencies(self):
result = []
for sequence in self.__sequences:
for char in sequence:
result.append(char)
count = dict((i, result.count(i)) for i in set(result))
total_sum = 0
frequencies = {}
for i, j in count.items():
total_sum += j
for i, j in count.items():
j = j / total_sum
frequencies.setdefault(i)
frequencies[i] = j
return frequencies
<file_sep>##############
# Exercise 2.6
##############
from collections import Counter
import os
# Get relative path to script
relative_path = os.path.dirname(__file__)
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
proteins = {
'A': 0, 'C': 0, 'D': 0, 'E': 0, 'F': 0, 'G': 0, 'H': 0, 'I': 0, 'K': 0, 'L': 0, 'M': 0, 'N': 0,
'P': 0, 'Q': 0, 'R': 0, 'S': 0, 'T': 0, 'V': 0, 'W': 0, 'Y': 0
}
totalLength = 0
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
return len(self.__sequences)
def get_average_length(self):
return self.totalLength / float(len(self.__sequences))
def read_fasta(self, path):
with open(path, 'r') as f:
seq = ''
sequence_started = False
for line in f:
if line.startswith('>') or line.startswith(';'):
if sequence_started:
self.totalLength = self.totalLength + len(seq)
self.__sequences.append(seq)
self.count_proteins(seq)
seq = ''
sequence_started = False
continue
sequence_started = True
seq += line.strip()
if seq.endswith('*'):
seq = seq[:-1]
self.totalLength = self.totalLength + len(seq)
self.__sequences.append(seq)
self.count_proteins(seq)
def get_abs_frequencies(self):
return self.proteins
def get_av_frequencies(self):
sum = 0
for i in self.proteins:
sum += self.proteins[i]
for i in self.proteins:
self.proteins[i] /= float(sum)
return self.proteins
def count_proteins(self, seq):
for aa in seq:
for i in self.proteins:
if i[0] == aa:
self.proteins[aa] = self.proteins[aa] + 1
break
def aa_dist(self, aa_seq):
counted = Counter(aa_seq)
for key in counted:
counted[key] /= len(aa_seq)
return counted
<file_sep>import numpy as np
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.predecessor_matrix = np.zeros_like(self.score_matrix) # contains decimal representations of 3 digit binary codes. From leftmost to rightmost bit: left predecessor, top predecessor, and diagonal predecessor. E.g. 100 means current cell has only the cell to its left as predecessor.
self.backtraces = []
self.alignments = []
self.align()
def align(self):
"""
Align given strings using the Smith-Waterman algorithm.
NB: score matrix and the substitution matrix are different matrices!
"""
for i in range(1, len(self.score_matrix)):
for j in range(1, len(self.score_matrix[0])):
self.predecessor_matrix[i][j], self.score_matrix[i][j] = self.choose_predecessor(i, j)
print(self.score_matrix)
print(self.predecessor_matrix)
max_val = -1
max_row = max_col = -1
for i, row in enumerate(self.score_matrix):
for j, value in enumerate(self.score_matrix[i]):
if self.score_matrix[i][j] > max_val:
max_val = self.score_matrix[i][j]
max_row = i
max_col = j
self.backtrace(max_row, max_col, [], self.backtraces)
self.alignments = self.trace_to_alignment(self.backtraces) # transform backtraces to alignments
pass
def trace_to_alignment(self, backtraces):
alignments = []
for backtrace in backtraces:
alignment = [[], []]
for predecessor in backtrace:
if predecessor[1] == '100':
alignment[0].append(self.string1[predecessor[0][1]])
alignment[1].append('-')
elif predecessor[1] == '010':
alignment[0].append('-')
alignment[1].append(self.string2[predecessor[0][0]])
elif predecessor[1] == '001':
alignment[0].append(self.string1[predecessor[0][1]])
alignment[1].append(self.string2[predecessor[0][0]])
alignments.append(("".join(alignment[0][::-1]), "".join(alignment[1][::-1])))
return alignments
def backtrace(self, row, col, pred_track, backtraces):
pred_track_local = pred_track[:] # Copy by value, otherwise pred_track doesn't get "reset" at branching cell as values will keep being appended to the very same list every time.
if self.score_matrix[row][col] == 0:
backtraces.append(pred_track)
return
for i, p in enumerate(list(format(self.predecessor_matrix[row][col], '03b'))):
if int(p):
if i == 0:
pred_track = pred_track_local[:]
pred_track.append(((row, col-1), '100'))
self.backtrace(row, col-1, pred_track, backtraces)
elif i == 1:
pred_track = pred_track_local[:]
pred_track.append(((row-1, col), '010'))
self.backtrace(row-1, col, pred_track, backtraces)
elif i == 2:
pred_track = pred_track_local[:]
pred_track.append(((row-1, col-1), '001'))
self.backtrace(row-1, col-1, pred_track, backtraces)
def choose_predecessor(self, row, col):
pred_code = ['0', '0', '0']
scores = [
self.score_matrix[row][col-1] + self.gap_penalty, # left
self.score_matrix[row-1][col] + self.gap_penalty, # top
self.score_matrix[row-1][col-1] + self.substitution_matrix[self.string2[row-1]] # diagonal
[self.string1[col-1]]
]
maximum_score = max(scores)
if maximum_score >= 0:
for i, s in enumerate(scores):
if s == maximum_score:
pred_code[i] = '1'
else:
maximum_score = 0
return (int("".join(pred_code), 2), maximum_score)
def has_alignment(self):
"""
:return: True if a local alignment has been found, False otherwise
"""
return self.backtraces[0] # will always contain at least on element
def get_alignment(self):
"""
:return: alignment represented as a tuple of aligned strings
"""
return self.alignments[0] # will always contain at least on element
def is_residue_aligned(self, string_number, residue_index):
if string_number == 1:
alignment_no_dash = self.alignments[0][0].replace('-', '')
if alignment_no_dash in self.string1:
start = self.string1.index(alignment_no_dash)
end = start + len(self.alignments[0][0])
padded_alignment = 'x'*start + self.alignments[0][0] + 'x'*(len(self.string1) - end)
padded_opposing_alignment = 'x'*start + self.alignments[0][1] + 'x'*(len(self.string1) - end)
if self.string1[residue_index] == padded_alignment[residue_index]:
# if self.string1[residue_index] == padded_opposing_alignment[residue_index]:
return True
elif string_number == 2:
alignment_no_dash = self.alignments[0][1].replace('-', '')
if alignment_no_dash in self.string2:
start = self.string2.index(alignment_no_dash)
end = start + len(self.alignments[0][1])
padded_alignment = 'x'*start + self.alignments[0][1] + 'x'*(len(self.string2) - end)
padded_opposing_alignment = 'x'*start + self.alignments[0][0] + 'x'*(len(self.string2) - end)
if self.string2[residue_index] == padded_alignment[residue_index]:
# if self.string2[residue_index] == padded_opposing_alignment[residue_index]:
return True
return False
def is_residue_aligned2(self, string_number, residue_index):
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been alined
False otherwise
"""
if string_number == 1:
# Check if alignment fits on top of original string
for i in range(len(self.string1)):
# koko = self.string1[i:len(self.alignments[0][0])+1]
if self.string1[i:len(self.alignments[0][0].replace('-', ''))+i] == self.alignments[0][0].replace('-', ''):
if residue_index in range(i,len(self.string1[i:len(self.alignments[0][0])+i])):
if self.string1[residue_index] == self.alignments[0][0][residue_index-i]:
# if self.string1[residue_index] == self.alignments[0][1][residue_index-i]:
return True
"""if self.string1[residue_index] in self.alignments[0][0][residue_index:]:
index_in_alignment = self.alignments[0][0][residue_index:].index(self.string1[residue_index])
if self.alignments[0][1][index_in_alignment] == self.string1[residue_index]:
return True"""
else:
for i in range(len(self.string2)):
# koko = self.alignments[0][1].replace('-', '')
if self.string2[i:len(self.alignments[0][1].replace('-', ''))+i] == self.alignments[0][1].replace('-', ''):
if residue_index in range(i,len(self.string2[i:len(self.alignments[0][1])+i])):
if self.string2[residue_index] == self.alignments[0][1][residue_index-i]:
# if self.string2[residue_index] == self.alignments[0][0][residue_index-i]:
return True
"""if self.string2[residue_index] in self.alignments[0][1][residue_index:]:
index_in_alignment = self.alignments[0][1][residue_index:].index(self.string2[residue_index])
if self.alignments[0][0][index_in_alignment] == self.string2[residue_index]:
return True"""
return False
# return mystring[residue_index] in self.alignments[0][1]
<file_sep>import numpy as np
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
#print(sequences)
if len(sequences) > 0:
if all(len(x) == len(sequences[0]) for x in sequences):
print("Sequences of same length")
if all(len(set(y) - set(ALPHABET)) == 0 for y in sequences):
print("Correct characters")
self.msa = sequences
else:
raise TypeError("Sequences have wrong characters")
else:
raise TypeError("Sequences of different length")
else:
raise TypeError("No Sequences")
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
back_freq = 0.05
pssm = np.zeros((len(self.get_primary_sequence()), 20))
indices = [i for i,v in enumerate(self.msa[0]) if v != "-"]
#print(indices)
#setup array consisting of msa columns; only containing space of primary sequence
primSeqArray = []
for j in indices:
column = ""
for i, sequence in enumerate(self.msa):
column += sequence[j]
#(column)
#print(set(column))
#print("______________________")
primSeqArray.append(column)
#print(primSeqArray)
#print(len(primSeqArray))
"""
#setup array consisting of msa columns; only containing space of primary sequence
primSeqArray = []
for j in range(len(self.msa[0])):
column = ""
for i, sequence in enumerate(self.msa):
column += sequence[j]
#(column)
#print(set(column))
#print("______________________")
primSeqArray.append(column)
primSeqArray = primSeqArray[8:28] + primSeqArray[38:83] + primSeqArray[86:129]
print(primSeqArray)
print(len(primSeqArray))
"""
for j, sequence in enumerate(primSeqArray):
result = sorted([(x, sequence.count(x)) for x in set(sequence)], key=lambda y: y[1])
resultWithoutGaps = [i for i in result if i[0] != "-"]
#print(result)
#print(resultWithoutGaps)
#fill pssm with absolute / weighted counts
if use_sequence_weights:
seqWeights = self.get_sequence_weights()
#print(seqWeights)
#print(sequence)
#print(range(len(sequence)))
for element in resultWithoutGaps:
#print(element)
#print(element[0])
for i in range(len(sequence)):
if sequence[i] == element[0]:
pssm[j][AA_TO_INT[element[0]]] += seqWeights[i]
#[seqWeights[i] for i in range(len(sequence)) if sequence[i] == element]
#print(pssm[j])
else:
for element in resultWithoutGaps:
pssm[j][AA_TO_INT[element[0]]] = element[1]
#print(pssm[j])
#fill pssm with #"-" * back_freq
if redistribute_gaps:
for element in result:
if element[0] == "-":
#print("Tuple found")
for i in range(20):
if bg_matrix != None:
pssm[j][i] += element[1] * sum(bg_matrix[i])
else:
pssm[j][i] += element[1] * back_freq
#print("Redistribute")
#print(pssm[j])
#for element in resultWithoutGaps:
# pssm[j][AA_TO_INT[element[0]]] = element[1]
if add_pseudocounts:
pseudocounts = np.zeros(20)
#print(pseudocounts)
for i in range(len(pseudocounts)):
for k in range(20):
if bg_matrix != None:
pseudocounts[i] += (pssm[j][k] / sum(bg_matrix[k])) * bg_matrix[i][k]
else:
pseudocounts[i] += (pssm[j][k] / 0.05) * (0.05 / 20)
#print("Pseudo")
#print(pseudocounts)
#print("____________________")
Fi = np.zeros(20)
alpha = self.get_number_of_observations() - 1
for i in range(20):
Fi[i] = (alpha * pssm[j][i] + beta * pseudocounts[i]) / (alpha + beta)
pssm[j][i] = Fi[i]
#print("PSSM")
#print(pssm[j])
sumDistinctAAs = 0
if redistribute_gaps:
sumDistinctAAs = sum(n for _, n in result)
elif use_sequence_weights:
for i in range(len(pssm[j])):
sumDistinctAAs += pssm[j][i]
else:
sumDistinctAAs = sum(n for _, n in resultWithoutGaps)
#print(sumDistinctAAs)
for i in range(20):
if pssm[j][i] != 0:
if bg_matrix != None:
back_freq = sum(bg_matrix[i])
if sumDistinctAAs != 0:
fij = pssm[j][i] / sumDistinctAAs
#print(fij)
pssm[j][i] = 2 * np.log2(fij / back_freq)
pssm[pssm == 0] = -20
#print(pssm[j])
"""
if redistribute_gaps:
else:
result = [i for i in result if i[0] != "-"] #deletes the gaps
sumDistinctAAs = sum(n for _, n in result)
for element in result:
#if element[0] != "-":
fij = element[1] / sumDistinctAAs
#print(element[0])
pssm[j][AA_TO_INT[element[0]]] = 2 * np.log2(fij / back_freq)
print(result)
print("CHECK")
print(result)
if redistribute_gaps:
sumDistinctAAs = sum(n for _, n in result)
else:
result = [i for i in result if i[0] != "-"] #deletes the gaps
sumDistinctAAs = sum(n for _, n in result)
for element in result:
#if element[0] != "-":
fij = element[1] / sumDistinctAAs
#print(element[0])
pssm[j][AA_TO_INT[element[0]]] = 2 * np.log2(fij / back_freq)
print(result)
#print("______")
pssm[pssm == 0] = -20
"""
"""
#functional basic pssm
for j, sequence in enumerate(primSeqArray):
result = sorted([(x, sequence.count(x)) for x in set(sequence)], key=lambda y: y[1])
print("CHECK")
print(result)
if redistribute_gaps:
sumDistinctAAs = sum(n for _, n in result)
else:
result = [i for i in result if i[0] != "-"] #deletes the gaps
sumDistinctAAs = sum(n for _, n in result)
for element in result:
#if element[0] != "-":
fij = element[1] / sumDistinctAAs
#print(element[0])
pssm[j][AA_TO_INT[element[0]]] = 2 * np.log2(fij / back_freq)
print(result)
#print("______")
pssm[pssm == 0] = -20
"""
#"--------MVATGLFVGLNKGHVVTKRE----------QPPRPNNRKGKTSKRTIFIRNLIKEVAGQAPYEKRITELLKVGKD---KRALKVAKRKLGTHKRAKRKREEMSSVLRKMRSGGAGASEKKK----",
#print(bg_matrix[AA_TO_INT["A"]][AA_TO_INT["A"]])
print(np.rint(pssm).astype(np.int64))
return np.rint(pssm).astype(np.int64)
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
print(len(self.msa), len(self.msa[0]))
return (len(self.msa), len(self.msa[0]))
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
#print(self.msa[0].replace("-", ""))
#print(len(self.msa[0].replace("-", "")))
return (self.msa[0].replace("-", ""))
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
array = []
for j in range(len(self.msa[0])):
column = ""
for i, sequence in enumerate(self.msa):
column += sequence[j]
ri = len(set(column))
if ri > 1:
columnArray = []
for k in range(len(column)):
wik = 1 / (ri * column.count(column[k]))
columnArray.append(wik)
array.append(columnArray)
#print(column)
#print(ri)
#print(columnArray)
#print("________________________")
#print(array)
weights = []
for l in range(len(array[0])):
sumweight = 0
for m in range(len(array)):
sumweight += array[m][l]
weights.append(sumweight)
#print(weights)
return weights#.astype(np.float64)
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
ri = 0
for j in range(len(self.msa[0])):
column = ""
for i, sequence in enumerate(self.msa):
column += sequence[j]
ri += len(set(column))
num_obs = (1/ len(self.msa[0])) * ri
print(num_obs)
return num_obs
<file_sep>import numpy as np
import copy
def traceback(i, j, string1,string2,substitution_matrix,gap_penalty,score_matrix,al1,al2):
#Attention! string1 is used to index columns, string2 is used to index rows
let2 = string2[i - 1]
let1 = string1[j - 1]
diag = score_matrix[i - 1][j - 1] + substitution_matrix[let2][let1]
ver = score_matrix[i - 1][j] + gap_penalty
hor = score_matrix[i][j - 1] + gap_penalty
maxv = max(diag, ver, hor, 0)
if maxv==0:
return [([""],[""])]
while i>=1 and j>=1 and maxv>0:
let2 = string2[i - 1]
let1 = string1[j - 1]
diag = score_matrix[i - 1][j - 1] + substitution_matrix[let2][let1]
ver = score_matrix[i - 1][j] + gap_penalty
hor = score_matrix[i][j - 1] + gap_penalty
maxv = max(diag, ver, hor, 0)
occnum=[diag,ver,hor].count(maxv)
if occnum>=1:
if maxv == diag:
al1.append(let1)
al2.append(let2)
i -= 1
j -= 1
elif maxv == ver:
i -= 1
al2.append(let2)
al1.append('-')
elif maxv == hor:
al2.append('-')
al1.append(let1)
j -= 1
let2 = string2[i - 1]
let1 = string1[j - 1]
al1.append(let1)
al2.append(let2)
return [(al1,al2)]
def traceback_from_global(i, j, string1,string2,substitution_matrix,gap_penalty,score_matrix,al1,al2):
while i>=1 and j>=1:
let2 = string2[i - 1]
let1 = string1[j - 1]
diag = score_matrix[i - 1][j - 1] + substitution_matrix[let2][let1]
ver = score_matrix[i - 1][j] + gap_penalty
hor = score_matrix[i][j - 1] + gap_penalty
maxv = max(diag, ver, hor,0)
occnum = [diag, ver, hor].count(maxv)
if maxv==0:
#al1.append(let1)
#al2.append(let2)
return [(al1, al2)]
if occnum == 1:
if maxv == diag:
al1.append(let1)
al2.append(let2)
i -= 1
j -= 1
elif maxv == ver:
i -= 1
al2.append(let2)
al1.append('-')
elif maxv == hor:
al2.append('-')
al1.append(let1)
j -= 1
elif occnum >1:
if hor==maxv and diag==maxv:
aligneddiag1 = copy.deepcopy(al1)
aligneddiag2 = copy.deepcopy(al2)
alignednotdiag1 = copy.deepcopy(al1)
alignednotdiag2 = copy.deepcopy(al2)
aligneddiag1.append(let1)
aligneddiag2.append(let2)
alignednotdiag1.append(let1)
alignednotdiag2.append('-')
alignments = []
for al in traceback(i-1, j-1, string1,string2,substitution_matrix,gap_penalty,score_matrix,aligneddiag1,aligneddiag2):
alignments.append(al)
for al in traceback(i, j - 1, string1, string2, substitution_matrix, gap_penalty, score_matrix,
alignednotdiag1, alignednotdiag2):
alignments.append(al)
return alignments
if hor<ver:
aligneddiag1 = copy.deepcopy(al1)
aligneddiag2 = copy.deepcopy(al2)
alignednotdiag1 = copy.deepcopy(al1)
alignednotdiag2 = copy.deepcopy(al2)
aligneddiag1.append(let1)
aligneddiag2.append(let2)
alignednotdiag1.append('-')
alignednotdiag2.append(let2)
alignments = []
for al in traceback(i - 1, j - 1, string1, string2, substitution_matrix, gap_penalty, score_matrix,
aligneddiag1, aligneddiag2):
alignments.append(al)
for al in traceback(i-1, j, string1, string2, substitution_matrix, gap_penalty, score_matrix,
alignednotdiag1, alignednotdiag2):
alignments.append(al)
return alignments
return [(al1,al2)]
def next_move(score_matrix, x, y,substitution_matrix,gap_penalty,string1,string2):
diag = score_matrix[x - 1][y - 1]+substitution_matrix[string2[x- 1]][string1[y - 1]]
up = score_matrix[x - 1][y]+gap_penalty
left = score_matrix[x][y - 1]+gap_penalty
if diag >= up and diag >= left: # Tie goes to the DIAG move.
return 1 if diag != 0 else 0 # 1 signals a DIAG move. 0 signals the end.
elif up > diag and up >= left: # Tie goes to UP move.
return 2 if up != 0 else 0 # UP move or end.
elif left > diag and left > up:
return 3 if left != 0 else 0 # LEFT move or end.
else:
# Execution should not reach here.
raise ValueError('invalid move during traceback')
def getdir(diag, ver, hor):
if diag<=0 and ver<=0 and hor<=0:
return 0
if diag >= ver and diag >= hor: # Tie goes to the DIAG move.
return 1 if diag != 0 else 0 # 1 signals a DIAG move. 0 signals the end.
elif ver > diag and ver >= hor: # Tie goes to UP move.
return 2 if ver != 0 else 0 # UP move or end.
elif hor > diag and hor > ver:
return 3 if hor != 0 else 0 # LEFT move or end.
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.alignment=("", "")
self.startendindices = ([0,0], [0,0])
self.align()
def align(self):
"""
Align given strings using the Smith-Waterman algorithm.
NB: score matrix and the substitution matrix are different matrices!
"""
# scoring matrix skeleton
mat = []
strings = [self.string2, self.string1]
l1 = list(range(0, (len(self.string1) + 1) * self.gap_penalty, self.gap_penalty))
for i in range(1, len(self.string2) + 1):
l = [i * self.gap_penalty] + [None] * (len(self.string1))
mat.append(l)
mat.insert(0, l1)
# scoring matrix filling
for i in range(1, len(mat)):
for j in range(1, len(mat[0])):
diag = mat[i - 1][j - 1] + self.substitution_matrix[self.string2[i - 1]][self.string1[j - 1]]
ver = mat[i - 1][j] + self.gap_penalty
hor = mat[i][j - 1] + self.gap_penalty
maxv = max(diag, ver, hor,0)
mat[i][j] = maxv
self.score_matrix = mat
#string1 is used to index columns, string2 is used to index rows
#get last indices of strings based on max score
maxv=0
i=0
j=0
for x in range(len(mat)-1,0,-1):
for y in range(len(mat[0])-1,0,-1):
if mat[x][y]>maxv:
maxv=mat[x][y]
i=x
j=y
# string1 is used to index columns, string2 is used to index rows string2,string1
al1=[]
al2 = []
alignments = traceback_from_global(i, j, self.string1, self.string2, self.substitution_matrix, self.gap_penalty,
self.score_matrix, al1, al2)
#alignments = traceback(i,j,self.string1,self.string2,self.substitution_matrix,self.gap_penalty,self.score_matrix,al1,al2)
al=alignments[0]
al[0].reverse()
al[1].reverse()
allist1=list(al[0])
allist1="".join(allist1)
allist2= list(al[1])
allist2 = "".join(allist2)
al=(allist1,allist2)
self.alignment = al
al1 = self.alignment[0]
al2=self.alignment[1]
self.startendindices[0][0] = self.string1.find(al1.replace('-', ""))
self.startendindices[1][0] = self.string2.find(al2.replace('-', ""))
self.startendindices[0][1] = self.string1.find(al1.replace('-', "")) + len(al1.replace('-', "")) - 1
self.startendindices[1][1] = self.string2.find(al2.replace('-', "")) + len(al2.replace('-', "")) - 1
def has_alignment(self):
"""
:return: True if a local alignment has been found, False otherwise
"""
return not(self.alignment[0] == "" and self.alignment[1] == "")
def get_alignment(self):
"""
:return: alignment represented as a tuple of aligned strings
"""
return self.alignment
def is_residue_aligned(self, string_number, residue_index):
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been alined
False otherwise
"""
stranges=[list(range(self.startendindices[0][0],self.startendindices[0][1]+1)),list(range(self.startendindices[1][0],self.startendindices[1][1]+1))]
return residue_index in stranges[string_number-1]
<file_sep>import numpy as np
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substituion_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.traceback_matrix = []
self.alignments = []
self.best_score = 0
# TODO always long enough? might need a (max(len1, len2))^2 matrix
self.align()
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
print(self.string1, self.string2)
print('sub matrix')
print(self.substituion_matrix)
print('score matrix')
print(self.score_matrix)
num_rows, num_cols = self.score_matrix.shape
print(num_rows, num_cols)
first_row = np.arange(start=0, stop=num_cols * self.gap_penalty, step=self.gap_penalty)
first_col = np.arange(start=0, stop=num_rows * self.gap_penalty, step=self.gap_penalty)
self.score_matrix[0, :] = first_row
self.score_matrix[:, 0] = first_col
print('score matrix first filled')
print(self.score_matrix)
traceback = []
for index_row in range(1, num_rows):
# row = self.score_matrix[index_row, :]
# print(row)
traceback_row = [[False, False, True]] # first col traceback is vertical
# traceback_row = []
for index_col in range(1, num_cols):
dia_score = self.score_matrix[index_row - 1, index_col - 1]
letter_1 = self.string1[index_col - 1]
letter_2 = self.string2[index_row - 1]
match_score = dia_score + self.substituion_matrix[letter_1][letter_2]
hor_gap_score = self.score_matrix[index_row, index_col - 1] + self.gap_penalty
vert_gap_score = self.score_matrix[index_row - 1, index_col] + self.gap_penalty
scores = [match_score, hor_gap_score, vert_gap_score]
max_score = max(scores)
traceback_entry = [score == max_score for score in scores]
# print(traceback_entry)
traceback_row.append(traceback_entry)
self.score_matrix[index_row, index_col] = max_score
# print(self.score_matrix[index_row, index_col])
traceback.append(traceback_row)
print('final score matrix')
print(self.score_matrix)
self.best_score = self.score_matrix[-1, -1]
print('traceback matrix')
print(traceback)
# add first row all horizontal
traceback = [[[False, True, False]] * num_cols] + traceback
print('fixed traceback')
print(traceback)
self.traceback_matrix = traceback
# now find the alignment(s)
alignments = []
index_row = num_rows - 1
index_col = num_cols - 1
res = ["", ""]
# res = [("", "")]
# alignments_indices = self.find_alignments_from_traceback(index_row, index_col, res)
initial_where_from = (False, False, False)
all_alignments_rev = self.find_traces(index_row, index_col, res, initial_where_from)
for alignment_rev in all_alignments_rev:
a = alignment_rev[0][::-1]
b = alignment_rev[1][::-1]
print(a)
print(b)
self.alignments.append((a, b))
def find_traces(self, index_row, index_col, res, where_from):
# end when top left
if index_row == 0:
if index_col == 0:
res[0] += self.string1[index_col]
res[1] += self.string2[index_row]
return [tuple(res)]
# add "self"
if where_from[0]: # dia
# res.append((self.string2[index_col], self.string1[index_row]))
res[0] += self.string1[index_col]
res[1] += self.string2[index_row]
elif where_from[1]: # hor
# res.append((self.string2[index_col], '-'))
res[0] += self.string1[index_col]
res[1] += '-'
elif where_from[2]: # vert
# res.append(('-', self.string1[index_row]))
res[0] += '-'
res[1] += self.string2[index_row]
# go further in
rec_res = []
next_traces = self.traceback_matrix[index_row][index_col]
if next_traces[0]:
# print('go dia')
res1 = res.copy()
where = (True, False, False)
rec_res += self.find_traces(index_row - 1, index_col - 1, res1, where)
if next_traces[1]:
# print('go left')
res2 = res.copy()
where = (False, True, False)
rec_res += self.find_traces(index_row, index_col - 1, res2, where)
if next_traces[2]:
# print('go up')
res3 = res.copy()
where = (False, False, True)
rec_res += self.find_traces(index_row - 1, index_col, res3, where)
return rec_res
def get_best_score(self):
"""
:return: the highest score for the aligned strings, int
"""
return self.best_score
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score
"""
return len(self.alignments)
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
return self.alignments
# return [
# ('ADMI-NS', 'ADMIRES'), ('ADMIN-S', 'ADMIRES')
# ]
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as a list of lists
"""
return self.score_matrix
# return [
# [0, -1, -2, -3, -4, -5, -6],
# [-1, 1, 0, -1, -2, -3, -4],
# [-2, 0, 2, 1, 0, -1, -2],
# [-3, -1, 1, 3, 2, 1, 0],
# [-4, -2, 0, 2, 4, 3, 2],
# [-5, -3, -1, 1, 3, 4, 3],
# [-6, -4, -2, 0, 2, 3, 4],
# [-7, -5, -3, -1, 1, 2, 4]
# ]
<file_sep>import sys
import numpy as np
from tests import pssm_test
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
# Check if sequences is a valid list of MSA sequences
if len(sequences) == 0:
raise TypeError
test_len = len(sequences[0])
for seq in sequences:
if len(seq) != test_len:
raise TypeError
for aa in seq:
if aa not in ALPHABET:
raise TypeError
self.sequences = sequences
# Transform the list of sequences into an array of ints
self.sequences_array = np.zeros(self.get_size(), dtype=np.int)
for seq_index, seq in enumerate(self.sequences):
for aa_index, aa in enumerate(seq):
self.sequences_array[seq_index][aa_index] = AA_TO_INT[aa]
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
ungapped_len = len(self.get_primary_sequence())
pssm = np.zeros((ungapped_len, 20))
bg_vector = self.bg_matrix_to_bg_vector(bg_matrix)
# 1. Calculate seq. weights
if use_sequence_weights:
sequence_weights = self.get_sequence_weights()
# 2. Count (optional using seq. weights weights) observed aas and gaps
primary_seq_with_index = self.get_primary_sequence(return_indices=True)
gap_count = np.zeros(ungapped_len)
for index, pair in enumerate(primary_seq_with_index):
for seq_index, seq in enumerate(self.sequences_array):
if seq[pair[1]] == GAP_INDEX:
if redistribute_gaps == False:
continue
if use_sequence_weights:
gap_count[index] += sequence_weights[seq_index]
else:
gap_count[index] += 1
continue
if use_sequence_weights:
pssm[index][seq[pair[1]]] += sequence_weights[seq_index]
else:
pssm[index][seq[pair[1]]] += 1
# 3 .Redistribute gap if this option is chosen
if redistribute_gaps:
for i in range(np.shape(pssm)[0]):
for j in range(np.shape(pssm)[1]):
pssm[i][j] += gap_count[i] * bg_vector[j]
# 4. Add weighted pseudocounts
if add_pseudocounts:
if bg_matrix is None:
substitution_freq = np.full((20, 20), 0.0025)
else:
substitution_freq = bg_matrix
pseudocounts = np.zeros(np.shape(pssm))
for i in range(np.shape(pssm)[0]):
for j in range(np.shape(pssm)[1]):
for k in range(np.shape(pssm)[1]):
pseudocounts[i][j] += (pssm[i][k] / bg_vector[k]
) * substitution_freq[j][k]
N = self.get_number_of_observations()
for i, row in enumerate(pssm):
for j, __ in enumerate(row):
pssm[i][j] = ((N-1) * pssm[i][j] + beta *
pseudocounts[i][j]) / ((N-1) + beta)
# 5. Normalize to relative frequency
row_sum = np.sum(pssm, axis=1)
for i in range(np.shape(pssm)[0]):
pssm[i] = np.divide(pssm[i], row_sum[i])
# 6. Divide by background frequency
for i in range(np.shape(pssm)[0]):
for j in range(np.shape(pssm)[1]):
pssm[i][j] = pssm[i][j] / bg_vector[j]
# 7. Calculate log score (with -20 instead of inf)
for i in range(np.shape(pssm)[0]):
for j in range(np.shape(pssm)[1]):
if pssm[i][j] == 0:
pssm[i][j] = 2**-10
pssm = 2*np.log2(pssm)
return np.rint(pssm).astype(np.int64)
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
# since __init__ ensures that all sequences have the same length it is
# sufficent to return the length of the first sequence
return (len(self.sequences), len(self.sequences[0]))
def bg_matrix_to_bg_vector(self, bg_matrix=None):
if bg_matrix is None:
return np.full(20, 0.05)
else:
return np.sum(bg_matrix, axis=0)
def get_primary_sequence(self, return_indices=False):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
if return_indices == False:
return ''.join([aa for aa in self.sequences[0] if aa != '-'])
return [(aa, aa_index) for aa_index, aa in enumerate(self.sequences[0]) if aa != '-']
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
weights = np.zeros((len(self.sequences[0]), len(self.sequences) + 1))
for col_index, col in enumerate(self.sequences_array.T):
# Get the unique aa in col and their counts.
unique, counts = np.unique(col, return_counts=True)
# r is just the number of unique aa in a sequence
weights[col_index][len(self.sequences)] = len(unique)
# s is the count for the a specific aa
for aa_index, aa in enumerate(col):
unique_index = np.where(unique == aa)[0]
s = counts[unique_index]
weights[col_index][aa_index] = s
sequence_weights = np.zeros(len(self.sequences))
for seq_index, seq in enumerate(weights.T):
weight = 0.0
for aa_index, aa in enumerate(seq):
if weights[aa_index][len(self.sequences)] == 1:
continue
weight += 1/(float(weights[aa_index][seq_index])
* float(weights[aa_index][len(self.sequences)]))
if seq_index == len(self.sequences):
break
sequence_weights[seq_index] = weight
return sequence_weights.astype(np.float64)
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
r = 0
for col_index, col in enumerate(self.sequences_array.T):
r += len(np.unique(col))
return (1/len(self.sequences[0])) * float(r)
<file_sep>##############
# Exercise 2.6
##############
import os
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
fastaFileData = ""
counter = 0
aminoAcidSequence = ""
absAAFrequencies = { 'A' : 0, 'R' : 0, 'N' : 0, 'D' : 0, 'C' : 0, 'E' : 0, 'Q' : 0, 'G' : 0, 'H' : 0, 'I' : 0, 'L' : 0, 'K' : 0, 'M' : 0, 'F' : 0, 'P' : 0, 'S' : 0, 'T' : 0, 'W' : 0, 'Y' : 0, 'V' : 0}
avgAAFrequencies = { 'A' : 0, 'R' : 0, 'N' : 0, 'D' : 0, 'C' : 0, 'E' : 0, 'Q' : 0, 'G' : 0, 'H' : 0, 'I' : 0, 'L' : 0, 'K' : 0, 'M' : 0, 'F' : 0, 'P' : 0, 'S' : 0, 'T' : 0, 'W' : 0, 'Y' : 0, 'V' : 0}
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
self.counter = 0
for textLine in self.fastaFileData:
if textLine.startswith(">"):
self.counter += 1
#print(self.counter)
#print("".join(self.fastaFileData))
return self.counter
def get_average_length(self):
self.aminoAcidSequence = ""
for textLine in self.fastaFileData.splitlines():
if not textLine.startswith(">") and not textLine.startswith("\n"):
self.aminoAcidSequence += textLine
self.aminoAcidSequence = self.aminoAcidSequence.replace("*", "")
average = (len(self.aminoAcidSequence) / self.counter)
#print(average)
#print(self.aminoAcidSequence)
#print(len(self.aminoAcidSequence))
return(average)
def read_fasta(self, path):
self.fastaFileData = ""
fastaFile = open(path)
self.fastaFileData = fastaFile.read()
print(self.fastaFileData)
def get_abs_frequencies(self):
self.absAAFrequencies = { 'A' : 0, 'R' : 0, 'N' : 0, 'D' : 0, 'C' : 0, 'E' : 0, 'Q' : 0, 'G' : 0, 'H' : 0, 'I' : 0, 'L' : 0, 'K' : 0, 'M' : 0, 'F' : 0, 'P' : 0, 'S' : 0, 'T' : 0, 'W' : 0, 'Y' : 0, 'V' : 0}
for aminoAcid in self.aminoAcidSequence:
self.absAAFrequencies[aminoAcid] += 1
print(self.absAAFrequencies)
return self.absAAFrequencies
def get_av_frequencies(self):
self.avgAAFrequencies = { 'A' : 0, 'R' : 0, 'N' : 0, 'D' : 0, 'C' : 0, 'E' : 0, 'Q' : 0, 'G' : 0, 'H' : 0, 'I' : 0, 'L' : 0, 'K' : 0, 'M' : 0, 'F' : 0, 'P' : 0, 'S' : 0, 'T' : 0, 'W' : 0, 'Y' : 0, 'V' : 0}
for aminoAcid in self.avgAAFrequencies:
self.avgAAFrequencies[aminoAcid] = self.absAAFrequencies[aminoAcid] / len(self.aminoAcidSequence)
print(self.avgAAFrequencies)
return self.avgAAFrequencies
"""
relative_path = os.path.dirname(__file__)
dist = AADist(os.path.join(relative_path,"tests.fasta"))
dist.get_counts()
dist.get_average_length()
dist.get_abs_frequencies()
dist.get_av_frequencies()
"""
<file_sep>import numpy as np
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substituion_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.first_string_length = len(self.string1)
self.second_string_length = len(self.string2)
self.align()
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
for i in range(self.first_string_length+1):
self.score_matrix[0][i] = self.gap_penalty * i
for i in range(self.second_string_length + 1):
self.score_matrix[i][0] = self.gap_penalty * i
for row in range(self.second_string_length+1):
for column in range(self.first_string_length+1):
if row != 0 and column != 0:
self.score_matrix[row][column] = max(self.score_matrix[row - 1][column - 1] + self.substituion_matrix[self.string2[row-1]][self.string1[column-1]],
self.score_matrix[row - 1][column] + self.gap_penalty, self.score_matrix[row][column - 1] + self.gap_penalty)
def get_best_score(self):
"""
:return: the highest score for the aligned strings, int
"""
return self.score_matrix[self.second_string_length][self.first_string_length]
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score
"""
alignments = self.get_alignments()
if alignments:
return len(alignments)
return 0
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
if 1 in [self.first_string_length, self.second_string_length]:
if self.string1[0] != self.string2[0]:
return None
list = []
list.append(("", "", self.second_string_length, self.first_string_length))
result = []
while len(list) > 0:
first, second, i, j = list.pop()
if i == 0 and j == 0:
result.append((first, second))
continue
condition = self.score_matrix[i][j] == self.score_matrix[i - 1][j - 1] + \
self.substituion_matrix[self.string2[i - 1]][self.string1[j - 1]]
if i > 0 and j > 0 and condition:
list.append((self.string2[i-1] + first, self.string1[j-1] + second, i-1, j-1))
if j > 0 and self.score_matrix[i][j] == self.score_matrix[i][j - 1] + self.gap_penalty:
list.append(("-" + first, self.string1[j-1] + second, i, j-1))
if i > 0 and self.score_matrix[i][j] == self.score_matrix[i-1][j] + self.gap_penalty:
list.append((self.string2[i-1] + first, "-" + second, i - 1, j))
return result
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as a list of lists
"""
return self.score_matrix
<file_sep>import numpy as np
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substituion_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.align()
self.alignmentList = []
self.alignment_finder(len(string2), len(string1), "", "")
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
row_length = len(self.string2) + 1
col_length = len(self.string1) + 1
# row
for i in range(row_length):
self.score_matrix[i, 0] = i * self.gap_penalty
# col
for i in range(col_length):
self.score_matrix[0, i] = i * self.gap_penalty
for i in range(1, row_length):
for j in range(1, col_length):
item_score = self.substituion_matrix[self.string1[j-1]][self.string2[i-1]]
match = self.score_matrix[i-1, j-1] + item_score
delete = self.score_matrix[i-1, j] + self.gap_penalty
insert = self.score_matrix[i, j-1] + self.gap_penalty
self.score_matrix[i ,j] = max(match, delete, insert)
def alignment_finder(self, i, j, alignment1, alignment2):
# found
if i == 0 and j == 0:
self.alignmentList.append((alignment1, alignment2))
# top
elif i == 0:
self.alignment_finder(i, j - 1, self.string1[j-1] + alignment1, "-" + alignment2)
# leftmost
elif j == 0:
self.alignment_finder(i-1, j, "-" + alignment1, self.string2[i-1] + alignment2)
# middle
else:
# up
if self.score_matrix[i-1,j] + self.gap_penalty == self.score_matrix[i,j]:
self.alignment_finder(i-1, j, "-" + alignment1, self.string2[i-1] + alignment2)
# left
if self.score_matrix[i, j-1] + self.gap_penalty == self.score_matrix[i,j]:
self.alignment_finder(i, j-1, self.string1[j-1] + alignment1, "-" + alignment2)
# diag
if self.score_matrix[i-1, j-1] + self.substituion_matrix[self.string2[i-1]][self.string1[j-1]] == self.score_matrix[i,j]:
self.alignment_finder(i-1, j-1, self.string1[j-1] + alignment1, self.string2[i-1] + alignment2)
def get_best_score(self):
"""
:return: the highest score for the aligned strings, int
"""
row_length = len(self.string2) + 1
col_length = len(self.string1) + 1
return self.score_matrix[row_length - 1, col_length - 1]
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score
"""
return len(self.alignmentList)
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
return self.alignmentList
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as a list of lists
"""
return self.score_matrix
<file_sep>import numpy as np
from util import subtract
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.rows = len(string2) + 1
self.cols = len(string1) + 1
self.gap_penalty = gap_penalty
self.subs = matrix
self.scores = np.zeros((self.rows, self.cols), dtype=np.int)
self.align()
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
for i, j in np.ndindex(self.scores.shape):
if i == 0 or j == 0:
self.scores[i, j] = (i + j) * self.gap_penalty
else:
letter1 = self.string1[j-1]
letter2 = self.string2[i-1]
score_cands = [
self.scores[i-1, j-1] + self.subs[letter2][letter1],
self.scores[i-1, j] + self.gap_penalty,
self.scores[i, j-1] + self.gap_penalty
]
self.scores[i, j] = max(score_cands)
def get_best_score(self):
"""
:return: the highest score for the aligned strings, int
"""
return self.scores[-1, -1]
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score
"""
return len(self.get_alignments())
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
# keep candidates in stack
stack = [("", "", (self.rows - 1, self.cols - 1))]
alignments = []
# loop candidates until stack is empty
while stack:
aligned1, aligned2, point = stack.pop()
if point == (0, 0):
# stop condition
alignments.append((aligned1, aligned2))
continue
letter1 = self.string1[point[1] - 1]
letter2 = self.string2[point[0] - 1]
# add candidates according to scores matrix
if self.scores[point] - self.scores[subtract(point, (1, 1))] == self.subs[letter1][letter2]:
stack.append((letter1 + aligned1, letter2 + aligned2, subtract(point, (1, 1))))
if self.scores[point] - self.scores[subtract(point, (1, 0))] == self.gap_penalty:
stack.append(("-" + aligned1, letter2 + aligned2, subtract(point, (1, 0))))
if self.scores[point] - self.scores[subtract(point, (0, 1))] == self.gap_penalty:
stack.append((letter1 + aligned1, "-" + aligned2, subtract(point, (0, 1))))
return alignments
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as a list of lists
"""
return self.scores.tolist()
<file_sep>import numpy as np
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.traceback_matrix = []
for i in range(0, len(string2) + 2):
row = []
for j in range(0, len(string1) + 2):
row.append([])
self.traceback_matrix.append(row)
self.calculate_scores()
self.alignments = self.align()
def calculate_scores(self):
rows = len(self.string2) + 1
columns = len(self.string1) + 1
acc = self.gap_penalty
for i in range(1, columns):
self.score_matrix[0][i] = acc
acc = acc + self.gap_penalty
acc = self.gap_penalty
for i in range(1, rows):
self.score_matrix[i][0] = acc
acc = acc + self.gap_penalty
do_row = True
do_column = True
width = 0
height = 0
while True:
if width < columns - 1:
do_column = True
width += 1
if height < rows - 1:
do_row = True
height += 1
# Row
if do_row:
do_row = False
for i in range(1, width):
self.calc_cell_score(height, i)
# Column
if do_column:
do_column = False
for i in range(1, height):
self.calc_cell_score(i, width)
# Corner
self.calc_cell_score(height, width)
if width == columns - 1 and height == rows - 1:
break
def calc_cell_score(self, y, x):
left_score = self.score_matrix[y][x-1] + self.gap_penalty
top_score = self.score_matrix[y-1][x] + self.gap_penalty
match_score = self.substitution_matrix[self.string1[x-1]][self.string2[y-1]]
diag_score = self.score_matrix[y-1][x-1] + match_score
max_score = max(left_score, top_score, diag_score)
if left_score == max_score:
self.traceback_matrix[y][x].append('L')
if top_score == max_score:
self.traceback_matrix[y][x].append('T')
if diag_score == max_score:
self.traceback_matrix[y][x].append('D')
self.score_matrix[y][x] = max_score
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
return self.get_alignment(len(self.string2), len(self.string1))
def get_alignment(self, y, x):
ret = []
if x == 0 and y == 0:
return [('', '')]
prefixes_d = []
prefixes_l = []
prefixes_t = []
for d in self.traceback_matrix[y][x]:
if d == 'D':
for prefix in self.get_alignment(y-1, x-1):
prefixes_d.append(prefix)
if d == 'L':
for prefix in self.get_alignment(y, x-1):
prefixes_l.append(prefix)
if d == 'T':
for prefix in self.get_alignment(y-1, x):
prefixes_t.append(prefix)
for prefix in prefixes_d:
str1, str2 = prefix
ret.append((str1+self.string1[x-1], str2+self.string2[y-1]))
for prefix in prefixes_l:
str1, str2 = prefix
ret.append((str1+self.string1[x-1], str2+'-'))
for prefix in prefixes_t:
str1, str2 = prefix
ret.append((str1+'-', str2+self.string2[y-1]))
return ret
def get_best_score(self):
"""
:return: the highest score for the aligned strings, int
"""
return self.score_matrix[len(self.string2)][len(self.string1)]
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score
"""
return len(self.alignments)
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
return self.alignments
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as a list of lists
"""
return self.score_matrix
<file_sep>#!/bin/bash
# run in moss/test
dirs=($(find . -maxdepth 1 -mindepth 1 -type d -printf '%f\n'))
#create results directory
if [ ! -d '../results' ]; then
mkdir '../results'
fi
#perform moss search and save results in moss_results.txt file
truncate -s 0 moss_results.txt
for d in ${dirs[@]}; do
cd ${d}
codefiledirs=($(find collected_files -maxdepth 1 -mindepth 1 -type d -printf '%f\n'))
cd ..
echo ${d} >> moss_results.txt
echo '#########' >> moss_results.txt
for f in ${codefiledirs[@]}; do
echo ${f} >> moss_results.txt
cd ..
templatefile="templates/${d}/${f}.py"
if [ -f $templatefile ]; then
echo "$templatefile serves as basefile"
./moss -l python -b ${templatefile} test/${d}/collected_files/${f}/*.py >> test/moss_results.txt
else
./moss -l python test/${d}/collected_files/${f}/*.py >> test/moss_results.txt
fi
cd test
echo ' ' >> moss_results.txt
done
echo ' ' >> moss_results.txt
done
#Aggregate links in moss_links.txt
truncate -s 0 moss_links.txt
grep "http://" moss_results.txt >> moss_links.txt
#Generate the Mossum-Graphs in '../results' directory
links_list=""
while IFS= read -r line # concatenate all links as string
do
echo $line
links_list="$links_list $line"
echo $links_list
done < "moss_links.txt"
cd ../mossum/mossum
python mossum.py -p 30 -t ".*/(.+).py" -m -o ../../results/result $links_list
cd ../../test
#Generate mossum graph for each excercise
IFS=' ' read -r -a array <<< "$links_list"
for d in ${dirs[@]}; do
cd ${d}
codefiledirs=($(find collected_files -maxdepth 1 -mindepth 1 -type d -printf '%f\n'))
cd ..
i=0
for f in ${codefiledirs[@]}; do
cd ../mossum/mossum
echo ${array[$i]}
python mossum.py -p 30 -t ".*/(.+).py" -o ../../results/$f ${array[$i]}
i=$i+1
cd ../../test
done
done
#git directly
<file_sep>import numpy as np
import math
from pathlib import Path
import re
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
def round_nearest_int(floate) :
flor = math.floor(floate)
if floate - flor > 0.5 :
return flor + 1
else :
return flor
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.sequences = list()
self.words = set()
pass
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
self.sequences.append(sequence)
pass
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
nie = list()
for seq in self.sequences :
if word in seq :
nie.append(seq)
return nie
def get_number_of_words(self) :
words = set()
sequences = set(self.sequences)
count = 0
for seq in sequences :
l = set([seq[i:i+3] for i in range(0, len(seq), 3)])
for word in l :
if word not in words and len(word) == 3:
words.add(word)
count += 1
self.words = words
return count
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
num_seq = len(self.sequences) #Number of sequences in database
words=set()
dwords = 0
for seq in self.sequences:
wordset = set()
for i in range(0, len(seq) - 2):
word = seq[i:i + 3]
if word not in wordset:
wordset.add(word)
dwords += len(wordset)
words.update(wordset)
nwords = len(words) #Number of different words in database
avg_num_words=round_nearest_int(dwords/num_seq)
nini = 0 # counts number of sequences in which each word appears, in total
for word in words:
seeq = 0
for seq in self.get_sequences(word):
if word in seq:
seeq+=1
nini += seeq
avg_num_seq=round_nearest_int(nini/nwords)
return tuple((num_seq, nwords, avg_num_words, avg_num_seq))
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.matrix = substitution_matrix
self.gg = dict()
pass
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
words = set()
gg = self.gg
if sequence != None :
n = len(sequence)
matrix = self.matrix
elif pssm.all() != None :
matrix = pssm
n = matrix.shape[0]
for i in range (n-2) :
for f in ALPHABET :
for s in ALPHABET :
for t in ALPHABET :
word = f + s + t
if sequence != None :
indexi = AA_TO_INT[sequence[i]]
indexi1 = AA_TO_INT[sequence[i+1]]
indexi2 = AA_TO_INT[sequence[i+2]]
else :
indexi = i
indexi1 = i + 1
indexi2 = i + 2
score = matrix[indexi][AA_TO_INT[f]] + matrix[indexi1][AA_TO_INT[s]] + matrix[indexi2][AA_TO_INT[t]]
if word not in words and score >= T :
words.add(word)
if word not in gg :
gg.update({word : (indexi, score)})
self.gg = gg
return list(words)
def extend_to_the_right(self, target, query, pssm, word, X, T, gg) :
index = target.index(word) + 2 #Index of the target that is being considered to compute de score
beginning = index - 2
final = index
qindex = gg[word][0] + 2 #Index of the query
begque = qindex - 2
finalque = qindex
if query == None :
matrix = pssm
lquery = pssm.shape[0]
score = matrix[qindex - 2][AA_TO_INT[word[0]]] + matrix[qindex - 1][AA_TO_INT[word[1]]] + matrix[qindex][AA_TO_INT[word[2]]]
else :
matrix = self.matrix #Matrix that is being used to get the score
lquery = len(query)
score = matrix[AA_TO_INT[word[0]]][AA_TO_INT[target[index - 2]]] + matrix[AA_TO_INT[word[1]]][AA_TO_INT[target[index - 1]]] + matrix[AA_TO_INT[word[2]]][AA_TO_INT[target[index]]]
#Compute the score with the considered word
maxscore = score
while score >= T and maxscore - score <= X and index < len(target)-1 and qindex < lquery-1 :
index += 1
qindex += 1
fscore = score
if query == None : #PSSM CASE
score += matrix[qindex][AA_TO_INT[target[index]]]
else :
score += matrix[AA_TO_INT[query[qindex]]][AA_TO_INT[target[index]]]
if score > fscore :
maxscore = score
final = index
finalque = qindex
return (beginning, final, begque, finalque, maxscore) #Beginning in target, Final index in target, Beg in query, Final in query, Computed Score
def extend_to_the_left(self, target, query, pssm, X, T, beginning, begque, score) :
if query == None : #PSSM CASE
matrix = pssm
else :
matrix = self.matrix
index = beginning
qindex = begque
maxscore = score
while score >= T and maxscore - score < X and index > 0 and qindex > 0 :
index -= 1
qindex -= 1
fscore = score
if query == None : #PSSM
score += matrix[qindex][AA_TO_INT[target[index]]]
else :
score += matrix[AA_TO_INT[query[qindex]]][AA_TO_INT[target[index]]]
if score > fscore :
maxscore = score
beginning = index
begque = qindex
return (beginning, begque, maxscore)
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
# if query != None :
# words = self.get_words(sequence=query, pssm=None, T=T)
# gg = self.gg
#
# elif pssm.all() != None :
# words = self.get_words(sequence=None, pssm=pssm, T=T)
# gg = self.gg
#
# for word in words :
# for target in set(blast_db.get_sequences(word)) :
# nn = re.finditer(word, target)
#
# for i in nn :
# s = i.start()
# beginning, final, begque, finalque, score = self.extend_to_the_right(target[s:], query, pssm, word, X, T, gg)
# beg, begqu, maxscore = self.extend_to_the_left(target, query, pssm, X, T, s, begque, score)
#
# if maxscore >= S :
# if target in d :
# if (begqu, beg, finalque - begqu, maxscore) not in d[target] :
# d[target].append((begqu, beg, finalque - begqu, maxscore))
# else :
# d.update({target : [(begqu, beg, finalque - begqu, maxscore)]})
return d
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]
<file_sep>##############
# Exercise 2.7
##############
def isCharged(aa):
chargedList = { "D", "E", "R", "H", "K"}
if aa in chargedList:
return True
else:
return False
def isPositivelyCharged(aa):
positivelyChargedList = {"R", "H","K"}
if aa in positivelyChargedList:
return True
else:
return False
def isNegativelyCharged(aa):
negativelyChargedList = {"D", "E"}
if aa in negativelyChargedList:
return True
else:
return False
def isHydrophobic(aa):
hydrophobicList = {"A", "I", "L", "M", "V", "F", "W", "Y"}
if aa in hydrophobicList:
return True
else:
return False
def isAromatic(aa):
aromaticList = {"F", "W", "Y", "H"}
if aa in aromaticList:
return True
else:
return False
def isPolar(aa):
polarList = {"R","D","E","H","K","Y","N", "Q", "S", "T"}
if aa in polarList:
return True
else:
return False
def isProline(aa):
if (aa == "P") :
return True
else:
return False
def containsSulfur(aa):
containsSulfurList = {"C", "M"}
if aa in containsSulfurList:
return True
else:
return False
def isAcid(aa):
acidList = {"D", "E"}
if aa in acidList:
return True
else:
return False
def isBasic(aa):
basicList = {"R", "H", "K"}
if aa in basicList:
return True
else:
return False
<file_sep>##############
# Exercise 2.6
##############
from itertools import groupby
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
aa_seq = []
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def returnList(self):
with open('noHeader.txt') as f:
aa_seq = f.read().splitlines()
return aa_seq
def get_counts(self):
aa_seq = self.returnList()
return len(aa_seq)
def get_average_length(self):
aa_seq = self.returnList()
total_sum = 0.0
for element in aa_seq:
total_sum = total_sum + len(element)
return total_sum / len(aa_seq)
def read_fasta(self, path):
fiter = self.fasta_iter(path)
with open('noHeader.txt', 'w') as aFile:
for ff in fiter:
headerStr, seq = ff
aFile.write(seq + "\n")
aFile.close()
with open('noHeader.txt') as f:
aa_seq = f.read().splitlines()
def fasta_iter(self, fasta_name):
fh = open(fasta_name)
faiter = (x[1] for x in groupby(fh, lambda line: line[0] == ">"))
for header in faiter:
headerStr = header.__next__()[1:].strip()
seq = "".join(s.strip() for s in faiter.__next__())
if seq.__contains__('*'):
seq = seq[0:len(seq)-1]
yield (headerStr, seq)
def get_abs_frequencies(self):
# return number of occurences not normalized by length
counter = {}
aa_seq = self.returnList()
for element in aa_seq:
for aa in element:
if aa in counter.keys():
counter[aa] = counter[aa] + 1
else:
counter[aa] = 1
return counter
def get_av_frequencies(self):
# return number of occurences normalized by length
aa_seq = self.returnList()
dict = self.get_abs_frequencies()
total_sum = 0
for key in dict:
total_sum = total_sum + dict[key]
for key, value in dict.items():
dict[key] = dict[key]/total_sum
return dict
<file_sep>import numpy as np
#random comment
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino aci
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.valueMax = 0
self.maxIndices = []
self.startIndices = []
self.result = list()
self.align()
def align(self):
n = len(self.string1)#columns
m = len(self.string2)#rows
for i in range(0, m + 1):
self.score_matrix[i][0] = 0
for j in range(0, n + 1):
self.score_matrix[0][j] = 0
for i in range(1, m + 1):
for j in range(1, n + 1):
x = self.substitution_matrix[self.string1[j-1]][self.string2[i-1]]
match = self.score_matrix[i - 1][j - 1] + x
delete = self.score_matrix[i - 1][j] + self.gap_penalty
insert = self.score_matrix[i][j - 1] + self.gap_penalty
self.score_matrix[i][j] = max(0,match, delete, insert)
self.valueMax = np.amax(self.score_matrix)
self.maxIndices =np.argwhere(self.score_matrix == self.valueMax)
self.findAligments(self.maxIndices[0][0], self.maxIndices[0][1],"","")
"""
Align given strings using the Smith-Waterman algorithm.
NB: score matrix and the substitution matrix are different matrices!
"""
def findAligments(self, i, j, a1, a2):
if self.score_matrix[i,j] == 0:
self.result.append((a1, a2))
self.startIndices.append((i,j))
return
elif i == 0 and j > 0:
self.findAligments(i, j - 1, self.string1[j-1] + a1, "-" + a2)
elif j == 0 and i > 0:
self.findAligments(i-1, j, "-" + a1, self.string2[i-1] + a2)
else:
if self.score_matrix[i, j-1] + self.gap_penalty == self.score_matrix[i,j]:
self.findAligments(i, j-1, self.string1[j-1] + a1, "-" + a2)
if self.score_matrix[i-1,j] + self.gap_penalty == self.score_matrix[i,j]:
self.findAligments(i-1, j, "-" + a1, self.string2[i-1] + a2)
if self.score_matrix[i-1, j-1] + self.substitution_matrix[self.string2[i-1]][self.string1[j-1]] == self.score_matrix[i,j]:
self.findAligments(i-1, j-1, self.string1[j-1] + a1, self.string2[i-1] + a2)
def has_alignment(self):
return self.valueMax > 0
"""
:return: True if a local alignment has been found, False otherwise
"""
def get_alignment(self):
"""
:return: alignment represented as a tuple of aligned strings ('DAC', 'DAC')
"""
return self.result[0]
def is_residue_aligned(self, string_number, residue_index):
if string_number == 1:
if self.maxIndices[0][1] >= residue_index and self.startIndices[0][1] <= residue_index:
return True
else:
return False
if string_number == 2:
if min(self.maxIndices[0][0],len(self.string2)-1) >= residue_index+1 and self.startIndices[0][0] <= residue_index+1:
return True
else:
return False
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been aligned
False otherwise
"""<file_sep>import numpy as np
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.align()
def align(self):
"""
Align given strings using the Smith-Waterman algorithm.
NB: score matrix and the substitution matrix are different matrices!
"""
def has_alignment(self):
"""
:return: True if a local alignment has been found, False otherwise
"""
return True
def get_alignment(self):
"""
:return: alignment represented as a tuple of aligned strings
"""
return ('DAC', 'DAC')
def is_residue_aligned(self, string_number, residue_index):
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been alined
False otherwise
"""
return False
<file_sep>##############
# Exercise 2.6
##############
from collections import Counter
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.number_of_sequences = -1
self.fasta_string = None
self.read_fasta(filepath)
def get_counts(self):
return self.number_of_sequences
def get_average_length(self):
return len(self.fasta_string) / self.get_counts()
def read_fasta(self, path):
self.fasta_string = ""
self.number_of_sequences = 0
with open(path) as f:
current_line = f.readline()
while current_line != "":
if current_line.startswith(">"):
self.number_of_sequences += 1
elif current_line.startswith(";"):
pass
else:
self.fasta_string += current_line
current_line = f.readline()
self.fasta_string = self.fasta_string.replace("\n", "").replace("*", "")
def get_abs_frequencies(self):
return Counter(self.fasta_string)
def get_av_frequencies(self):
counter = self.get_abs_frequencies()
for key in counter:
counter[key] /= len(self.fasta_string)
return counter
<file_sep>import numpy as np
from collections import Counter
from pathlib import Path
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
class KMP:
def partial(self, pattern):
""" Calculate partial match table: String -> [Int]"""
ret = [0]
for i in range(1, len(pattern)):
j = ret[i - 1]
while j > 0 and pattern[j] != pattern[i]:
j = ret[j - 1]
ret.append(j + 1 if pattern[j] == pattern[i] else j)
return ret
def search(self, T, P):
"""
KMP search main algorithm: String -> String -> [Int]
Return all the matching position of pattern string Pb in S
"""
partial, ret, j = self.partial(P), [], 0
for i in range(len(T)):
while j > 0 and T[i] != P[j]:
j = partial[j - 1]
if T[i] == P[j]: j += 1
if j == len(P):
ret.append(i - (j - 1))
j = partial[j - 1]
return ret
class BlastDb:
def __init__(self, db=[]):
"""
Initialize the BlastDb class.
"""
self.db = db
self.words = []
self.num_words = 0
self.words_set = set()
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
self.db.append(sequence)
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
results = []
for sequence in self.db:
if word in sequence:
results.append(sequence)
return results
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
# COUNT WORDS 1ST METHOD
# for sequence in self.db:
# for idx in range(0, len(sequence), 3):
# word = sequence[idx:idx+3]
# if len(word) == 3 and word not in self.words:
# self.num_words += 1
# self.words.append(word)
# COUNT WORDS 2ND METHOD
# total = 0
# for sequence in self.db:
# words_sequence = {sequence[i:i+3] for i in range(len(sequence)-2)}
# total += len(words_sequence)
# self.words_set = self.words_set.union(words_sequence)
# COUNT WORDS 3RD METHOD
total = 0
final_counter = Counter()
for sequence in self.db:
words_sequence = {sequence[i:i+3] for i in range(len(sequence)-2)}
final_counter += Counter(words_sequence)
total += len(words_sequence) # FIXME: MAYBE NOT FINAL
self.words_set = self.words_set.union(words_sequence)
total_final_counter = sum(final_counter.values())
num_sequences = len(self.db)
num_words = len(self.words_set)
avg_words = round(total/num_sequences)
avg_sequences = round(total_final_counter/num_words)
result = (num_sequences, num_words, avg_words, avg_sequences)
return result
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.substitution_matrix = substitution_matrix
self.score = 0
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
my_set = set()
if sequence is not None:
list_words = [sequence[i:i+3] for i in range(len(sequence)-2)]
for word in list_words:
for aa1 in ALPHABET:
for aa2 in ALPHABET:
for aa3 in ALPHABET:
score_1 = self.substitution_matrix[AA_TO_INT[word[0]]][AA_TO_INT[aa1]]
score_2 = self.substitution_matrix[AA_TO_INT[word[1]]][AA_TO_INT[aa2]]
score_3 = self.substitution_matrix[AA_TO_INT[word[2]]][AA_TO_INT[aa3]]
if score_1 + score_2 + score_3 >= T:
my_set.add(aa1+aa2+aa3)
if pssm is not None:
for idx in range(len(pssm)-2):
for aa1 in ALPHABET:
for aa2 in ALPHABET:
for aa3 in ALPHABET:
score_1 = pssm[idx][AA_TO_INT[aa1]]
score_2 = pssm[idx+1][AA_TO_INT[aa2]]
score_3 = pssm[idx+2][AA_TO_INT[aa3]]
if score_1 + score_2 + score_3 >= T:
my_set.add(aa1+aa2+aa3)
return list(my_set)
def compute_score_query(self,score,target,query,right=False,left=False):
if score is None: # HACKERY
# First word case
self.score = 0
score_1 = self.substitution_matrix[AA_TO_INT[target[0]]][AA_TO_INT[target[0]]]
score_2 = self.substitution_matrix[AA_TO_INT[target[1]]][AA_TO_INT[target[1]]]
score_3 = self.substitution_matrix[AA_TO_INT[target[2]]][AA_TO_INT[target[2]]]
self.score = score_1 + score_2 + score_3
return self.score
elif left:
self.score += self.substitution_matrix[AA_TO_INT[target[-1]]][AA_TO_INT[query[-1]]]
return self.score
elif right:
self.score += self.substitution_matrix[AA_TO_INT[target[0]]][AA_TO_INT[query[0]]]
return self.score
# def compute_score_pssm(self,score,target,query,right=False,left=False):
# if score is None:
# # First word case
# self.score = 0
# score_1 = pssm[idx][AA_TO_INT[aa1]]
# score_2 = pssm[idx+1][AA_TO_INT[aa2]]
# score_3 = pssm[idx+2][AA_TO_INT[aa3]]
# self.score = score_1 + score_2 + score_3
# return self.score
# elif left:
# self.score += self.substitution_matrix[AA_TO_INT[target[-1]]][AA_TO_INT[query[-1]]]
# return self.score
# elif right:
# self.score += self.substitution_matrix[AA_TO_INT[target[0]]][AA_TO_INT[query[0]]]
# return self.score
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplicates).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
if query is not None:
query = "MVATGLFVGLNKGHVVTKREQPPRPNNRKGKTSKRTIFIRNLIKEVAGQAPYEKRITELLKVGKDKRALKVAKRKLGTHKRAKRKREEMSSVLRKMRSGGAGASEKKK"
# Generate all words of length w that when aligned with the query have a score greater or equal to T
all_words = self.get_words(sequence=query,T=T)
# Search database for target sequences containing at least one of those words
for word in all_words:
matched_sequences = blast_db.get_sequences(word)
#print(matched_sequences)
for target in matched_sequences:
highest_score = S
idx_target = target.find(word) # FIXME: What if more than one occurence??
idx_query = query.find(word) # FIXME: What if more than one occurence??
score = None
saved_word = None
saved_idx_target_end = None
saved_idx_query_end = None
idx_target_end = idx_target + 3
idx_query_end = idx_query + 3
# Extend words to find local high-scoring segment pairs (HSP) with a score greater or equal to S
## Extend (without gaps) in one direction first (right)...
while idx_target_end < len(target)+1 and idx_query_end < len(query)+1:
new_word = target[idx_target:idx_target_end]
score = self.compute_score_query(score,new_word,query[idx_query:idx_query_end],left=True) # Next 1 (to the right) (or total)
## Stop extending into a direction once score drops X below highest score encountered
if score <= highest_score - X:
break
elif score > highest_score:
highest_score = score
saved_word = new_word
saved_idx_target_end = idx_target_end
saved_idx_query_end = idx_query_end
## Keep shortest highest-scoring HSP (always include original word)
if score >= highest_score:
ignore = False
if target not in d:
d[target] = [(idx_query,idx_target,len(new_word),score)]
else:
for el in d[target]:
if el[3] == score:
diff = len(new_word) - el[2]
if el[0] - idx_query == diff and el[1] - idx_target == diff:
ignore = True
break
if not ignore:
d[target].append((idx_query,idx_target,len(new_word),score))
idx_target_end += 1
idx_query_end += 1
if saved_idx_target_end is not None:
idx_target_end = saved_idx_target_end
idx_query_end = saved_idx_query_end
## ...then the other (left)
idx_target -= 1 # Avoid repeated first word
idx_query -= 1 # Avoid repeated first word
while idx_target > -1 and idx_query > -1:
new_word = target[idx_target:idx_target_end]
score = self.compute_score_query(score,new_word,query[idx_query:idx_query_end],right=True) # Next 1 (to the left) (or total)
## Stop extending into a direction once score drops X below highest score encountered
if score <= highest_score - X:
break
elif score > highest_score:
highest_score = score
## Keep shortest highest-scoring HSP (always include original word)
if score >= highest_score:
ignore = False
if target not in d:
d[target] = [(idx_query,idx_target,len(new_word),score)]
else:
for el in d[target]:
if el[3] == score:
diff = len(new_word) - el[2]
if el[0] - idx_query == diff and el[1] - idx_target == diff:
ignore = True
break
if not ignore:
d[target].append((idx_query,idx_target,len(new_word),score))
idx_target -= 1
idx_query -= 1
print(d)
return d
#if pssm is not None:
# Repeat everything else
#d['SEQWENCE'] = [(1, 2, 4, 13)]
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]
<file_sep>##############
# Exercise 2.6
##############
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
return len(self.__sequences)
def get_average_length(self):
abs = 0
for x in self.__sequences:
abs += len(x)
return abs / self.get_counts()
def add_sequence(self, seq):
self.__sequences.append(seq.replace('*', ''))
pass
def read_fasta(self, filename):
with open(filename, 'r') as f:
seq = ''
sequence_started = False
for line in f:
if line.startswith('>') or line.startswith(';'):
if sequence_started:
# self.__sequences.append(seq)
self.add_sequence(seq)
seq = ''
sequence_started = False
continue
sequence_started = True
seq += line.strip()
self.add_sequence(seq)
#self.__sequences.append(seq)
def get_abs_frequencies(self):
dict = {}
for amino in self.__sequences:
for single in amino:
if not single in dict:
dict[single] = 1
else:
dict[single] += 1
return dict
def get_av_frequencies(self):
dict = self.get_abs_frequencies()
abs = 0
for x in dict.values():
abs += x
new_dict = {}
for key, value in dict.items():
new_dict[key] = value / abs
return new_dict
<file_sep># -*- coding: utf-8 -*-
"""
IMPORTANT!:
Before writing an email asking questions such as
'What does this input has to be like?' or
'What return value do you expect?' PLEASE read our
exercise sheet and the information in this template
carefully.
If something is still unclear, PLEASE talk to your
colleagues before writing an email!
If you experience technical issues or if you find a
bug we are happy to answer your questions. However,
in order to provide quick help in such cases we need
to avoid unnecessary emails such as the examples
shown above.
"""
from Bio.PDB.MMCIFParser import MMCIFParser # Tip: This module might be useful for parsing...
import numpy as np
import os
from Bio.PDB.Polypeptide import PPBuilder
############# Exercise 2: Protein Data Bank #############
# General remark: In our exercise every structure will have EXACTLY ONE model.
# This is true for nearly all X-Ray structures. NMR structures have several
# models.
class PDB_Parser:
CIF_PARSER = MMCIFParser() # parser object for reading in structure in CIF format
def __init__( self, path ):
'''
Initialize every PDB_Parser with a path to a structure-file in CIF format.
An example file is included in the repository (7ahl.cif).
Tip: Store the parsed structure in an object variable instead of parsing it
again & again ...
'''
base = os.path.basename(path)
id = os.path.splitext(base)[0].upper()
self.structure = self.CIF_PARSER.get_structure(id, path)
# 3.8 Chains
def get_number_of_chains( self ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
Return:
Number of chains in this structure as integer.
'''
return len(list(self.structure.get_chains()))
def get_chain_by_id(self, chain_id):
chain = next(filter(lambda chain: chain.get_id() == chain_id, self.structure.get_chains()))
if chain == None:
raise ValueError("Chain with id {} does not exist".format(chain_id))
return chain
def get_filtered_residues_of_chain(self, chain_id):
chain = self.get_chain_by_id(chain_id)
residues = [residue for residue in chain.get_residues() if residue.id[0] == ' '] # het flag
return residues
# 3.9 Sequence
def get_sequence( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the amino acid sequence (single-letter alphabet!) of a given chain (chain_id)
in a Biopython.PDB structure as a string.
'''
ppb = PPBuilder()
chain = self.get_chain_by_id(chain_id)
return ppb.build_peptides(chain)[0].get_sequence()
# 3.10 Water molecules
def get_number_of_water_molecules( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the number of water molecules of a given chain (chain_id)
in a Biopython.PDB structure as an integer.
'''
chain = self.get_chain_by_id(chain_id)
return sum(1 for residue in chain if residue.get_resname() == "HOH")
# 3.11 C-Alpha distance
def get_ca_distance( self, chain_id_1, index_1, chain_id_2, index_2 ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id_1 : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
index_1 : index of a residue in a given chain in a Biopython.PDB structure
chain_id_2 : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
index_2 : index of a residue in a given chain in a Biopython.PDB structure
chain_id_1 and index_1 describe precisely one residue in a PDB structure,
chain_id_2 and index_2 describe the second residue.
Return:
Return the C-alpha (!) distance between the two residues, described by
chain_id_1/index_1 and chain_id_2/index_2. Round the returned value via int().
The reason for using two different chains as an input is that also the distance
between residues of different chains can be interesting.
Different chains in a PDB structure can either occur between two different proteins
(Heterodimers) or between different copies of the same protein (Homodimers).
'''
residue_1 = self.get_chain_by_id(chain_id_1)[index_1]
residue_2 = self.get_chain_by_id(chain_id_2)[index_2]
residue_1_coord = residue_1["CA"].get_coord()
residue_2_coord = residue_2["CA"].get_coord()
return int(np.linalg.norm(residue_1_coord-residue_2_coord))
# 3.12 Contact Map
def get_contact_map( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return a complete contact map (see description in exercise sheet)
for a given chain in a Biopython.PDB structure as numpy array.
The values in the matrix describe the c-alpha distance between all residues
in a chain of a Biopython.PDB structure.
Only integer values of the distance have to be given (see below).
'''
residues = self.get_filtered_residues_of_chain(chain_id)
size = len(residues)
contact_map = np.full((size, size), np.nan, dtype=np.float32)
row = col = 0
for r_a in residues:
seq_a = r_a.get_id()[1]
for r_b in residues:
seq_b = r_b.get_id()[1]
if np.isnan(contact_map[(row,col)]):
distance = self.get_ca_distance(chain_id, seq_a, chain_id, seq_b)
contact_map[(row,col)] = contact_map[(col,row)] = distance
col += 1
row += 1
col = 0
return contact_map.astype(np.int)
# 3.13 B-Factors
def get_bfactors( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the B-Factors for all residues in a chain of a Biopython.PDB structure.
The B-Factors describe the mobility of an atom or a residue.
In a Biopython.PDB structure B-Factors are given for each atom in a residue.
Calculate the mean B-Factor for a residue by averaging over the B-Factor
of all atoms in a residue.
Sometimes B-Factors are not available for a certain residue;
(e.g. the residue was not resolved); insert np.nan for those cases.
Finally normalize your B-Factors using Standard scores (zero mean, unit variance).
You have to use np.nanmean, np.nanvar etc. if you have nan values in your array.
The returned data structure has to be a numpy array rounded again to integer.
'''
residues = self.get_filtered_residues_of_chain(chain_id)
bfactors = np.array([0] * len(residues), dtype=np.float32)
for idx, residue in enumerate(residues):
atom_bfactors = [atom.get_bfactor() for atom in residue.get_atoms()]
bfactors[idx] = np.nanmean(atom_bfactors)
zscores = np.array([0] * len(residues), dtype=np.int64)
mean_bfactor = np.nanmean(bfactors)
std_bfactor = np.nanstd(bfactors)
for idx, bfactor in enumerate(bfactors):
zscores[idx] = int((bfactor - mean_bfactor) / std_bfactor)
return zscores
def main():
print('PDB parser class.')
return None
if __name__ == '__main__':
main()
<file_sep>import numpy as np
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.align()
def align(self):
"""
Align given strings using the Smith-Waterman algorithm.
NB: score matrix and the substitution matrix are different matrices!
"""
self.score_matrix = self.create_score_matrix()
self.alignment = self.get_alignment()
def has_alignment(self):
"""
:return: True if a local alignment has been found, False otherwise
"""
alignment = self.alignment
alignment1 = alignment[0]
alignment2 = alignment[1]
if len(alignment1) > 0 and len(alignment2) > 0:
return True
else:
return False
def get_alignment(self):
"""
:return: alignment represented as a tuple of aligned strings
"""
seq1 = self.string2
seq2 = self.string1
matrix = self.score_matrix
alignment1, alignment2 = '', ''
index1,index2 = [],[]
i, j = np.unravel_index(matrix.argmax(), matrix.shape)[0], np.unravel_index(matrix.argmax(), matrix.shape)[1]
while matrix[i][j]!=0:
index1.append(i-1)
index2.append(j-1)
score_current = matrix[i][j]
score_diagonal = matrix[i - 1][j - 1]
score_up = matrix[i][j - 1]
score_left = matrix[i - 1][j]
if score_current == score_left + self.gap_penalty:
alignment1 += seq1[i - 1]
alignment2 += '-'
i -= 1
elif score_current == score_diagonal + self.match_score(seq1[i - 1], seq2[j - 1]):
alignment1 += seq1[i - 1]
alignment2 += seq2[j - 1]
i -= 1
j -= 1
elif score_current == score_up + self.gap_penalty:
alignment1 += '-'
alignment2 += seq2[j - 1]
j -= 1
alignment1 = alignment1[::-1]
alignment2 = alignment2[::-1]
self.index1 = index1
self.index2 = index2
return (alignment2,alignment1)
def is_residue_aligned(self, number, index):
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been aligned
False otherwise
"""
if number == 1:
indexes = self.index2
else:
indexes = self.index1
if index in indexes:
return True
else:
return False
def create_score_matrix(self):
matrix = self.score_matrix
rows = matrix.shape[0]
cols = matrix.shape[1]
gap_penalty = self.gap_penalty
seq1 = self.string2
seq2 = self.string1
for i in range(0, rows):
matrix[i][0] = 0
for j in range(0, cols):
matrix[0][j] = 0
for i in range(1, rows):
for j in range(1, cols):
check = matrix[i - 1][j - 1] + self.match_score(seq1[i - 1], seq2[j - 1])
remove = matrix[i - 1][j] + gap_penalty
add = matrix[i][j - 1] + gap_penalty
matrix[i][j] = max(check, remove, add,0)
return np.array(matrix)
def match_score(self, ck1, ck2):
if ck1 == '-' or ck2 == '-':
return self.gap_penalty
else:
return self.substitution_matrix[ck1][ck2]
<file_sep>import numpy as np
import copy
from itertools import combinations
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.db=[]
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
self.db.append(sequence)
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
return [sequence for sequence in self.db if word in sequence]
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
'''test_json = 'blast_test.json'
with Path("/home/marcelloferoce/Scrivania/pp1ss19exercise5-exercise-ge68mij/tests", test_json).open('r') as json_file:
json_data = json.load(json_file)
sm=np.array(json_data['sub_matrix'], dtype=np.int64)
query = "MGPRARPAFLLLMLLQTAVL"
target = "MGELMAFLLPLIIVLMVKHS"
ex = getHspOneHitQuery(startingindex_query=8, startingindex_target=6, sm=sm, X=5, query=query,
target=target)'''
wordset = set([])
sumseq = 0
for sequence in self.db:
cwordset = set([])
for i in range(len(sequence) - 2):
word = sequence[i:i + 3]
if word not in cwordset:
sumseq += 1
wordset.add(word)
cwordset.add(word)
summdiffword = 0
for sequence in self.db:
diffwordset = set([])
for i in range(len(sequence) - 2):
diffwordset.add(sequence[i:i + 3])
summdiffword += len(diffwordset)
return tuple(list([len(self.db), len(wordset), round(summdiffword / len(self.db)), round(sumseq / len(wordset))]))
'''diffwords=[]
wordsperseq=[]
sequencesperword=[]
for seq in self.db:
diffwordseq = []
for i in range(len(seq)-2):
word=seq[i:i+3]
if word not in diffwords:
diffwords.append(word)
if word not in diffwordseq:
diffwordseq.append(word)
wordsperseq.append(len(diffwordseq))
for word in diffwords:
numsequences=0
for seq in self.db:
if word in seq:
numsequences+=1
sequencesperword.append(numsequences)
return (len(self.db),len(diffwords),round(sum(wordsperseq)/len(wordsperseq)),round(sum(sequencesperword)/len(sequencesperword)))'''
return tuple(1, 2, 3, 4)
def getAllWords(ALPHABET):
words=[]
for letter in ALPHABET:
for letter2 in ALPHABET:
for letter3 in ALPHABET:
word=letter+letter2+letter3
words.append(word)
return words
def getScoreTwoWords(orword, word, sm):
return sm[AA_TO_INT[orword[0]]][AA_TO_INT[word[0]]]+sm[AA_TO_INT[orword[1]]][AA_TO_INT[word[1]]]+sm[AA_TO_INT[orword[2]]][AA_TO_INT[word[2]]]
def getSameWordScore(orword, sm):
return sm[AA_TO_INT[orword[0]]][AA_TO_INT[orword[0]]]+sm[AA_TO_INT[orword[1]]][AA_TO_INT[orword[1]]]+sm[AA_TO_INT[orword[2]]][AA_TO_INT[orword[2]]]
def getStartingIndices(orword, seq):
numoc=seq.count(orword)
startingindices=[]
for i in range(len(seq) - 2):
word = seq[i:i + 3]
if word == orword:
startingindices.append(i)
return startingindices
def getScoreTwoSequences(queryword, targetword, sm):
score=0
for i in range(len(queryword)):
score+=sm[AA_TO_INT[queryword[i]]][AA_TO_INT[targetword[i]]]
return score
def getScoreTwoSequencesPssm(pssmsplit, word):
score=0
for i in range(len(pssmsplit)):
score+=pssmsplit[i][AA_TO_INT[word[i]]]
return score
def getScorePssmWord(splitpssm, word):
score=splitpssm[0][AA_TO_INT[word[0]]]+splitpssm[1][AA_TO_INT[word[1]]]+splitpssm[2][AA_TO_INT[word[2]]]
return score
def getHspOneHitQuery(startingindex_query, startingindex_target, sm, X, query, target):
i=copy.deepcopy(startingindex_query)+3
j=copy.deepcopy(startingindex_target)+3
maxendindexquery = copy.deepcopy(i)
maxendindextarget =copy.deepcopy(j)
maxscore = getScoreTwoSequences(query[startingindex_query:i], target[startingindex_target:j], sm)
maxquery=query[startingindex_query:i]
stop=False#(92, 221, 16, 33)
while i <=len(query) and j<=len(target) and not stop:
queryword = query[startingindex_query:i]
targetword = target[startingindex_target:j]
score = getScoreTwoSequences(queryword, targetword, sm)
if score>maxscore:
maxscore=copy.deepcopy(score)
maxendindexquery=copy.deepcopy(i)
maxendindextarget = copy.deepcopy(j)
maxquery = copy.deepcopy(queryword)
elif maxscore-score>=X:
stop=True
i+=1
j+=1
i = startingindex_query
j = startingindex_target
newstartindex_query=copy.deepcopy(i)
newstartindex_target = copy.deepcopy(j)
stop = False
while i >=0 and j>=0 and not stop:
queryword = query[i:maxendindexquery]
targetword = target[j:maxendindextarget]
score = getScoreTwoSequences(queryword, targetword, sm)
if score > maxscore:
maxscore = copy.deepcopy(score)
maxquery = copy.deepcopy(queryword)
newstartindex_query = copy.deepcopy(i)
newstartindex_target = copy.deepcopy(j)
elif maxscore - score >= X:
stop = True
i -= 1
j -= 1
return (newstartindex_query,newstartindex_target,len(maxquery),int(maxscore))
def getHspOneHitPssm(startingindex_pssm, startingindex_target, pssm, X, target):
i = copy.deepcopy(startingindex_pssm) + 3
j = copy.deepcopy(startingindex_target) + 3
maxindexpssm = copy.deepcopy(i)
maxendindextarget = copy.deepcopy(j)
maxscore=getScorePssmWord(pssm[i:i+3],target[startingindex_target:j])
maxpssm = pssm[startingindex_pssm:i]
stop = False
while i <= len(pssm) and j <= len(target) and not stop:
pssmword = pssm[startingindex_pssm:i]
targetword = target[startingindex_target:j]
score = getScoreTwoSequencesPssm(pssmword, targetword)
if score > maxscore:
maxscore = copy.deepcopy(score)
maxindexpssm = copy.deepcopy(i)
maxendindextarget = copy.deepcopy(j)
maxpssm = copy.deepcopy(pssmword)
elif maxscore - score >= X:
stop = True
i += 1
j += 1
i = startingindex_pssm
j = startingindex_target
newstartindex_query = copy.deepcopy(i)
newstartindex_target = copy.deepcopy(j)
stop = False
while i >= 0 and j >= 0 and not stop:
pssmword = pssm[i:maxindexpssm]
targetword = target[j:maxendindextarget]
score = getScoreTwoSequencesPssm(pssmword, targetword)
if score > maxscore:
maxscore = copy.deepcopy(score)
maxpssm = copy.deepcopy(pssmword)
newstartindex_query = copy.deepcopy(i)
newstartindex_target = copy.deepcopy(j)
elif maxscore - score >= X:
stop = True
i -= 1
j -= 1
return (newstartindex_query, newstartindex_target, len(maxpssm), int(maxscore))
def extendHsprLeft(hspl, hspr, sm, X, query, target):
i = hspr[0]
j = hspr[1]
newstartindex_query = copy.deepcopy(i)
newstartindex_target = copy.deepcopy(j)
stop = False
overlap=False
maxendindexquery=hspr[0]+hspr[2]-1
maxendindextarget=hspr[1]+hspr[2]-1
querysequence=query[newstartindex_query:maxendindexquery+1]
targetsequence=target[newstartindex_target:maxendindextarget+1]
maxscore = getScoreTwoSequences(querysequence,targetsequence , sm)
while i >= 0 and j >= 0 and not stop:
if (hspl[1] + hspl[2] - 1) >= j:
overlap=True
break
queryword = query[i:maxendindexquery+1]
targetword = target[j:maxendindextarget+1]
score = getScoreTwoSequences(queryword, targetword, sm)
if score > maxscore:
maxscore = copy.deepcopy(score)
elif maxscore - score >= X:
stop = True
i -= 1
j -= 1
if overlap:
score=getScoreTwoSequences(query[hspl[0]:maxendindexquery+1], target[hspl[1]:maxendindextarget+1], sm)
lenhsp=maxendindexquery-hspl[0]+1
return [(hspl[0],hspl[1],lenhsp,score)]
else:
return []
def extendHspRight(hsp, sm, X, query, target):
i = copy.deepcopy(hsp[0]+ hsp[2]-1)
j = copy.deepcopy(hsp[1] + hsp[2]-1)
querysequence=query[hsp[0]:i+1]
targetsequence=target[hsp[1]:j+1]
maxscore = getScoreTwoSequences(querysequence, targetsequence, sm)
maxquery = query[hsp[0]:i+1]
stop = False
while i < len(query) and j < len(target) and not stop:
queryword = query[hsp[0]:i+1]
targetword = target[hsp[1]:j+1]
score = getScoreTwoSequences(queryword, targetword, sm)
if score > maxscore:
maxscore = copy.deepcopy(score)
maxquery = copy.deepcopy(queryword)
elif maxscore - score >= X:
stop = True
i += 1
j +=1
return (hsp[0],hsp[1],len(maxquery),maxscore)
def extendHsplRight(hspl, hspr, sm, X, query, target):
i = hspl[0]+hspl[2]-1
j = hspl[1]+hspl[2]-1
stop = False
overlap = False
querysequence=query[hspl[0]:i+1]
targetsequence=target[hspl[1]:j+1]
maxscore = getScoreTwoSequences(querysequence,targetsequence, sm)
while i <len(query) and j <len(target) and not stop:
if j>=hspr[1]:
overlap = True
break
queryword = query[hspl[0]:i+1]
targetword = target[hspl[1]:j+1]
score = getScoreTwoSequences(queryword, targetword, sm)
if score > maxscore:
maxscore = copy.deepcopy(score)
elif maxscore - score >= X:
stop = True
i += 1
j += 1
if overlap:
score = getScoreTwoSequences(query[hspl[0]:hspr[0]+hspl[2]], target[hspl[1]:hspr[1]+hspl[2]], sm)
lenhsp = hspr[0]+hspr[2]-hspl[0]+1
return [(hspl[0], hspl[1], lenhsp, score)]
else:
return [hspl, hspr]
def extendHspLeft(hsp, sm, X, query, target):
i = hsp[0]
j = hsp[1]
newstartindex_query = copy.deepcopy(i)
newstartindex_target = copy.deepcopy(j)
stop = False
maxendindexquery = hsp[0] + hsp[2]
maxendindextarget = hsp[1] + hsp[2]
maxscore = getScoreTwoSequences(query[newstartindex_query:maxendindexquery],
target[newstartindex_target:maxendindextarget], sm)
while i >= 0 and j >= 0 and not stop:
queryword = query[i:maxendindexquery+1]
targetword = target[j:maxendindextarget+1]
score = getScoreTwoSequences(queryword, targetword, sm)
if score > maxscore:
maxscore = copy.deepcopy(score)
newstartindex_query=copy.deepcopy(i)
newstartindex_target=copy.deepcopy(j)
elif maxscore - score >= X:
stop = True
i -= 1
j -= 1
lenhsp=maxendindexquery-newstartindex_query
return (newstartindex_query,newstartindex_target,lenhsp,maxscore)
def extendHsps(hspl,hspr,sm, X, query, target):
hsps=extendHsprLeft(hspl,hspr,sm, X, query, target)
if len(hsps)==1:
hsp=extendHspRight(hsps[0],sm, X, query, target)
return [hsp]
return []
'''else:
hsps = extendHsplRight(hspl, hspr,sm, X, query, target)
if len(hsps) == 1:
hsp=extendHspLeft(hsps, sm, X, query, target)
overlapped = True
if overlapped:
return [hsp]
return [hspl,hspr]'''
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.allwords=getAllWords(ALPHABET)
self.sm=substitution_matrix
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
words=[]
if sequence!=None:
for i in range(len(sequence)-2):
orword = sequence[i:i + 3]
for word in self.allwords:
score = getScoreTwoWords(orword, word,self.sm)
if score >= T and word not in words:
words.append(word)
else:
for i in range(len(pssm)-2):
splitpssm = pssm[i:i + 3]
for word in self.allwords:
score=getScorePssmWord(splitpssm,word)
if score >= T and word not in words:
words.append(word)
return words
#return ['AAA', 'YYY']
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
if query != None:
wordsoverthreshold=self.get_words(sequence=query,T=T)
for wordthres in wordsoverthreshold:
for i in range(len(query) - 2):
queryword=query[i:i+3]
if getScoreTwoSequences(queryword,wordthres,sm=self.sm)>=T:
for target in blast_db.db:
if wordthres in target:
starting_indices = getStartingIndices(wordthres, target)
for startingindex_target in starting_indices:
hsp = getHspOneHitQuery(i, startingindex_target, self.sm, X, query, target)
if hsp[3] >= S:
try:
d[target]
except:
d[target] = []
if hsp not in d[target]:
d[target].append(hsp)
else:
wordsoverthreshold = self.get_words(sequence=None,pssm=pssm,T=T)
for wordthres in wordsoverthreshold:
for i in range(len(pssm) - 2):
pssmsplit = pssm[i:i + 3]
if getScoreTwoSequencesPssm(pssmsplit,wordthres)>=T:
for target in blast_db.db:
if wordthres in target:
starting_indices = getStartingIndices(wordthres, target)
for startingindex_target in starting_indices:
hsp=getHspOneHitPssm(i,startingindex_target,pssm,X,target)
if hsp[3] >= S:
try:
d[target]
except:
d[target] = []
if hsp not in d[target]:
d[target].append(hsp)
return d
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
targets=blast_db.db
if query != None:
querywords=[None]*(len(query)-2)
for i in range(len(query) - 2):
querywords[i]=query[i:i + 3]
wordstargets={}
wordsoverthreshold = self.get_words(sequence=query, T=T)
for target in targets:
wordstargets[target] = []
for word in wordsoverthreshold:
if word in target:
for startingindex in getStartingIndices(word, target):
for i in range(len(querywords)):
if getScoreTwoWords(querywords[i],word,self.sm)>=T:
wordstargets[target].append((i,startingindex))
wordstargetscouples={}
for target in wordstargets.keys():
wordstargetscouples[target]=[]
for indexcouple in wordstargets[target]:
for indexcouple2 in wordstargets[target]:
if abs(indexcouple[1]-indexcouple2[1])==abs(indexcouple[0]-indexcouple2[0]) and 0<abs(indexcouple[1]-indexcouple2[1])<=A:
wordstargetscouples[target].append((indexcouple,indexcouple2))
for target in wordstargetscouples.keys():
'''if target=='MAAAEEEDGGPEGPNRERGGASATFECNICLETAREAVVSVCGHLYCWPCLHQWLETRPDRQECPVCKAGISREKVVPLYGRGSQKPQDPRLKTPPRPQGQRPAPESRGGFQPFGDAGGFHFSFGVGAFPFGFFTTVFNAHEPFRRGAGVDLGQGHPASSWQDSLFLFLAIFFFFWLLSI':
cacca=""
#[(19, 92, 16, 30), (21, 94, 14, 30)]
#blast_results[key]
#[(21, 94, 14, 30)]
'''
for couple in wordstargetscouples[target]:
extendedhps=[]
if not (couple[0][1] in [couple[0][1],couple[0][1]+1,couple[0][1]+2] or couple[1][1] in [couple[1][1],couple[1][1]+1,couple[1][1]+2]):
if couple[0][1] < couple[1][1] and couple[0][0] < couple[1][0]:
scorel=getScoreTwoWords(query[couple[0][0]:couple[0][0]+3],target[couple[0][1]:couple[0][1]+3],sm=self.sm)
hl=(couple[0][0],couple[0][1],scorel,3)
scorer = getScoreTwoWords(query[couple[1][0]:couple[1][0] + 3],target[couple[1][1]:couple[1][1] + 3], sm=self.sm)
hr = (couple[1][0], couple[1][1], scorer, 3)
extendedhps = extendHsps(hl, hr, self.sm, X, query, target)
elif couple[0][1] > couple[1][1] and couple[0][0] > couple[1][0]:
scorer = getScoreTwoWords(query[couple[0][0]:couple[0][0] + 3],
target[couple[0][1]:couple[0][1] + 3], sm=self.sm)
hr= (couple[0][0], couple[0][1], scorer, 3)
scorel = getScoreTwoWords(query[couple[1][0]:couple[1][0] + 3],
target[couple[1][1]:couple[1][1] + 3], sm=self.sm)
hl = (couple[1][0], couple[1][1], scorel, 3)
extendedhps = extendHsps(hl, hr, self.sm, X, query, target)
for hsp in extendedhps:
if hsp[3] >= S:
try:
d[target]
except:
d[target] = []
if hsp not in d[target]:
d[target].append(hsp)
return d
<file_sep>import numpy as np
import sys
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
if sequences is None:
raise TypeError("Sequences is null")
if not sequences:
raise TypeError("Length should be >= 1")
length = len(sequences[0])
for seq in sequences:
for aa in seq:
if aa not in ALPHABET:
raise TypeError("Invalid amino acid found")
if len(seq) != length:
raise TypeError("Sequences with different length found")
self.sequences = sequences
self.count = np.zeros((len(self.get_primary_sequence()), 20))
index = 0
for j in range(len(self.sequences[0])):
if self.sequences[0][j] == '-':
continue
s = 0
for k in range(len(self.sequences)):
if self.sequences[k][j] != '-':
self.count[index][AA_TO_INT[self.sequences[k][j]]] += 1
index += 1
np.set_printoptions(threshold=sys.maxsize)
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
if redistribute_gaps:
self.count = self.count_with_gaps(bg_matrix)
if use_sequence_weights:
self.count = self.count_with_weights()
if add_pseudocounts:
self.count = self.compute_pseudocounts()
#print(self.count)
pssm = np.zeros((len(self.count), 20))
for i in range(len(pssm)):
aaSum = 0
for j in range(len(pssm[0])):
aaSum += self.count[i][j]
for k in range(len(pssm[0])):
if bg_matrix is None:
freq = 0.05
else:
m = np.sum(bg_matrix, axis = 0)
freq = m[AA_TO_INT[ALPHABET[k]]]
pssm[i][k] = float(self.count[i][k] / aaSum )
pssm[i][k] = float(pssm[i][k] / freq)
s = float(2 * np.log2(pssm[i][k]))
if np.isinf(s):
pssm[i][k] = -20
else:
pssm[i][k] = s
print(np.rint(pssm).astype(np.int64))
return np.rint(pssm).astype(np.int64)
def compute_pseudocounts(self, bg_matrix=None):
count = np.zeros((len(self.get_primary_sequence(), len(self.count[0]))))
index = 0
for j in range(len(self.sequences[0])):
if self.sequences[0][j] == '-':
continue
s = 0
for k in range(len(self.sequences)):
if self.sequences[k][j] != '-':
count[index][AA_TO_INT[self.sequences[k][j]]] += 1
count[index][AA_TO_INT]
index += 1
for i in range(len(count)):
for j in range(len(count[0])):
backFreq = 0
if bg_matrix is None:
backFreq = 0.05
else:
m = np.sum(bg_matrix, axis = 0)
backFreq = m[AA_TO_INT[ALPHABET[k]]]
count[i][j] = count[i][j] / backFreq
for i in range(len(count)):
for j in range(len(count[0])):
subFreq = 0
def count_with_gaps(self, bg_matrix=None):
self.count = np.zeros((len(self.get_primary_sequence()), 20))
index = 0
for j in range(len(self.sequences[0])):
if self.sequences[0][j] == '-':
continue
for k in range(len(self.sequences)):
if self.sequences[k][j] != '-':
self.count[index][AA_TO_INT[self.sequences[k][j]]] += 1
index += 1
countGaps = np.zeros((len(self.get_primary_sequence()), 21))
index = 0
for j in range(len(self.sequences[0])):
if self.sequences[0][j] == '-':
continue
for k in range(len(self.sequences)):
countGaps[index][AA_TO_INT[self.sequences[k][j]]] += 1
index += 1
for i in range(len(self.count)):
for k in range(len(self.count[0])):
freq = 0
if bg_matrix is None:
freq = 0.05
else:
m = np.sum(bg_matrix, axis=0)
freq = m[AA_TO_INT[ALPHABET[k]]]
countGaps[i][k] = float(self.count[i][k] + (countGaps[i][-1] * freq))
return countGaps
def count_with_weights(self):
count = np.zeros((len(self.get_primary_sequence()), len(self.count[0])))
weights = self.get_sequence_weights()
index = 0
for j in range(len(self.sequences[0])):
if self.sequences[0][j] == '-':
continue
for k in range(len(self.sequences)):
if self.sequences[k][j] != '-':
count[index][AA_TO_INT[self.sequences[k][j]]] += weights[k]
index += 1
return count
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return (len(self.sequences), len(self.sequences[0]))
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
return self.sequences[0].replace("-", "")
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
matrix = np.zeros((len(self.sequences) + 1, len(self.sequences[0])), dtype=np.float)
weights = np.zeros(len(self.sequences), dtype=np.float)
for i in range(len(self.sequences)):
seq = self.sequences[i]
s = 0
for k in range(len(seq)):
r = 0
rList = []
for j in range(len(self.sequences)):
aa = self.sequences[j][k]
if aa not in rList:
r += 1
rList.append(aa)
if self.sequences[j][k] == self.sequences[i][k]:
s += 1
if k <= len(self.sequences[0]) - 1:
matrix[i][k] = s
s = 0
matrix[len(self.sequences)][k] = r
for i in range(len(matrix)):
w = 0
for j in range(len(matrix[i])):
if matrix[len(self.sequences)][j] > 1 and i < len(self.sequences):
matrix[i][j] = np.reciprocal(np.multiply(matrix[i][j], matrix[len(self.sequences)][j]))
w += matrix[i][j]
if i < len(self.sequences):
weights[i] = w
return weights.astype(np.float)
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
num_obs = -1.
obs = np.zeros((len(self.sequences[0]), ))
for i in range(len(self.sequences)):
seq = self.sequences[i]
for k in range(len(seq)):
r = 0
rList = []
for j in range(len(self.sequences)):
aa = self.sequences[j][k]
if aa not in rList:
r += 1
rList.append(aa)
obs[k] = r
num_obs = np.sum(obs) / len(self.sequences[0])
return np.float(num_obs)
<file_sep>import numpy as np
import sys
import math
from collections import Counter
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
self.validateMSA(sequences)
self.msa = sequences
self.weight_matrix = None
self.num_ind_obs = None
self.seq_weights = None
self.calc_w_matrix_and_ind_obs()
self.calcSeqWeights()
def validateMSA(self, sequences):
hasMultipleSequences = len(sequences) > 0
sameLength = len(set(map(len, sequences))) in (0, 1)
onlyValidChars = all(map(self.hasOnlyValidCharacters, sequences))
if not(hasMultipleSequences and sameLength and onlyValidChars):
raise TypeError("Invalid MSA!")
def hasOnlyValidCharacters(self, sequence):
return all(aa in ALPHABET for aa in sequence)
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
# Calculate array of background frequencies
in_use_bg_freq_arr = self.calculate_bg_freq(bg_matrix)
in_use_bg_matrix = self.get_bg_matrix(bg_matrix)
print("Background frequncies used", in_use_bg_freq_arr)
# Rows -> number of amino acids, columns -> 20 amino acids plus one column for gaps
pssm = np.zeros((len(self.msa[0]), 21))
self.count_frequencies(pssm, use_sequence_weights)
print("After counting", pssm)
pssm = self.redistribute_gaps(pssm, in_use_bg_freq_arr, redistribute_gaps)
print("After redistributing", pssm)
if add_pseudocounts:
pssm = self.add_pseudo_counts(pssm, beta, in_use_bg_matrix, in_use_bg_freq_arr)
print("After adding pseudocounts", pssm)
self.normalize_to_relative_frequencies(pssm)
print("After normalization", pssm)
pssm = self.divide_by_background_frequencies(pssm, in_use_bg_freq_arr)
print("After division by bg_freq", pssm)
pssm = self.calculate_log_score(pssm)
print("After calculating log score", pssm)
pssm = self.remove_gap_rows(pssm)
print("After removing gap rows", pssm)
pssm = np.rint(pssm).astype(np.int64)
print("After rounding", pssm)
return pssm
def get_bg_matrix(self, bg_matrix):
if bg_matrix == None:
return np.full((20,20), 1/400)
else:
return bg_matrix
def calculate_bg_freq(self, bg_matrix):
if bg_matrix == None:
return np.full(20, 1/20)
else:
return np.array(bg_matrix).sum(axis=0)
def redistribute_gaps(self, pssm, bg_freq_arr, redistribute_gaps):
gap_counts = pssm[:,-1]
pssm_without_gaps = pssm[:, 0:-1]
if redistribute_gaps:
for row in range(self.get_size()[1]):
pssm_without_gaps[row,:] = pssm_without_gaps[row,:] + bg_freq_arr * gap_counts[row]
return pssm_without_gaps
def add_pseudo_counts(self, pssm, beta, bg_matrix, bg_freq_arr):
pseudo_matrix = np.zeros(pssm.shape)
for row in range(pssm.shape[0]):
for col in range(pssm.shape[1]):
pseudo_matrix[row, col] = sum(freq / bg_freq_arr[idx] * bg_matrix[idx][col] for idx, freq in enumerate(pssm[row,:]))
return (pssm * (self.get_number_of_observations() - 1) + pseudo_matrix * beta) / ((self.get_number_of_observations() - 1) + beta)
def count_frequencies(self, pssm, use_sequence_weights):
rows = self.get_size()[1]
if use_sequence_weights:
weights = self.seq_weights
else:
weights = np.ones(self.get_size()[0])
for pos in range(rows):
posSeq = ''.join(seq[pos] for seq in self.msa)
for seq_id, aa in enumerate(posSeq) :
col = AA_TO_INT[aa]
pssm[pos, col] += weights[seq_id]
def normalize_to_relative_frequencies(self, pssm):
row_sums = pssm.sum(axis=1)
for row, row_sum in enumerate(row_sums):
pssm[row,:] = pssm[row,:] / row_sum
def divide_by_background_frequencies(self, pssm, bg_freq_arr):
pssm = pssm / bg_freq_arr
return pssm
def calculate_log_score(self, pssm):
pssm = np.log2(pssm) * 2
np.place(pssm, np.isneginf(pssm), -20)
return pssm
def remove_gap_rows(self, pssm):
mask = [True if char != '-' else False for char in self.msa[0]]
return pssm[mask,:]
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return (len(self.msa), len(self.msa[0]))
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
return self.msa[0].replace('-', '')
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
return self.seq_weights
def calc_w_matrix_and_ind_obs(self):
seq_length = len(self.msa[0])
weight_matrix = np.zeros((seq_length, len(self.msa) + 1))
ind_observations = 0
for pos, _ in enumerate(self.msa[0]):
posSeq = ''.join(seq[pos] for seq in self.msa)
count = Counter(posSeq)
r = len(count)
ind_observations += r
weight_matrix[pos, -1] = r
for idx, aa in enumerate(posSeq):
equalOccurences = count[aa]
weight_matrix[pos, idx] = 1 / (r * equalOccurences)
self.weight_matrix = weight_matrix
self.num_ind_obs = ind_observations / seq_length
def calcSeqWeights(self):
weights = np.zeros(len(self.msa))
rows, columns = self.weight_matrix.shape
# Don't need the r column
for col in range(columns - 1):
weights[col] = sum(self.weight_matrix[row, col] for row in range(rows) if self.weight_matrix[row, -1] != 1)
self.seq_weights = weights.astype(np.float64)
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
return self.num_ind_obs
if __name__ == '__main__':
valid_msa = [
"--------MVATGLFVGLNKGHVVTKRE----------QPPRPNNRKGKTSKRTIFIRNLIKEVAGQAPYEKRITELLKVGKD---KRALKVAKRKLGTHKRAKRKREEMSSVLRKMRSGGAGASEKKK----",
"----MTTPQVKTGLFVGLNKGHVVTRRE----------LAPRPRSRKGKTSKRTIFIRNLIKEVAGQAPYEKRITELLKVGKD---KRALKVAKRKLGTHKRAKRKREEMSSVLRKMRSGGGGATEKKK----",
"----MTTPAVKTGLFVGLNKGHVVTRRE----------LAPRPNSRKGKTSKRTIFIRKLIREVAGMAPYEKRITELLKVGKD---KRALKVAKRKLGTHKRAKRKREEMSSVLRKMRSLGGAAAAEKKM---",
"----MTTPQVKTGLFVGLNKGHVVTRRE----------LAPRPRSRKGKTSKRTIFIRNLIKEVAGQAPYEKRITELLK------------VAKRKLGTHKRAKRKREEMSSVLRKMRSGGGGATEKKK----",
"----------MGEIAVGLNKGHQVTKKA----------GTPRPSRRKGFLSQRVKKVRAVVREVAGWAPYERRVMELLKVGKD---KRALKMCKRKLGTHMRGKKKREEMAGVLRKMQAASKGE---------",
"----MAPKQPNTGLFVGLNKGHIVTKKE----------LAPRPSDRKGKTSKRTHFVRNLIREVAGFAPYEKRITELLKVGKD---KRALKVRQEKVGHSQESKEEER--GDVQCSP--------PDEGWWWY",
"---------MAPGLVVGLNKGKVLTKRQ----------LPERPSRRKGQLSKRTSFVRSIVREVAGFAPYERRVMELIRNSQD---KRARKLAKKRLGTLKRAKGKIEELTSVIQSSRLAH------------",
"-------MGVQYKLAVGLGKGHKVTKNE----------YKPRPSRRKGALSKHTRFVRDLIREVCGFAPFERRAMELLKVSKD---KRALKFIKKRLGTHLRGKRKRDELSNVLVAQRKAAAHKEKTEHK---",
"-------MALRYPMAVGLNKGHKVTKNV----------SKPRHSRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"-------MALRYPMAVGLNKGHKVTKNV----------SKPRHSRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"-------MALRYPMAVGLNKGHKVTKNV----------GKPRHSRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"---------MAPGLVVGLNKGKTLTKRQ----------LPERPSRRKGHLSKRTAFVRSIVREVAGFAPYERRVMELIRNSQD---KRARKLAKKRLGTLKRAKGKIEELTSVIQSSRLAH------------",
"-------MAIRYPMAVGLNKGHKVTKNV----------SKPRHCRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"-------MAIRYPMAVGLNKGHRVTKNV----------TKPRHCRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRIGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"-------MAIRYPMAVGLNKGHRVTKNV----------TKPRHCRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRIGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"-------MALRYPMAVGLNKGHKVTKNV----------SKPRHSRRRGRLTKHTKFVRDMIREVCAFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"-------MAIRYPMAVGLNKGYKVTKNV----------SKPRHCRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"-------MVVRYPMAVGLNKGHKVTKNV----------SKPKHSRRRGRLTKHAKFARDLIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNTLAAMRKAAAKKE--------",
"-------MAIRYPMAVGLKKGHPVTKNV----------TKPKHSRRGGRLTKHSKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNILAAMRKAAAKKE--------",
"---MAKEAPAKTGLAVGLNKGHKTTARV----------VKPRVSRTKGHLSKRTAFVREVVKEVAGLAPYERRVIELLRNSKD---KRARKLAKKRLGTFGRAKRKVDELQRVIAESRRAH------------",
"-------MALRYPMAVGLNKGHKVTKNV----------SKPRHSRRRSRLTNHTKFVRDMIREVCGFAPYERRAMELLKVSKS---KRALKFIKKRVGTHIRAKRKREELSNVLAAMEEAAAKKD--------",
"-----MSGPGIEGLAVGLNKGHAATQLP----------VKQRQNRHKGVASKKTKIVRELVREITGFAPYERRVLEMLRISKD---KRALKFLKRRIGTHRRAKGKREELQNVIIAQRKAHK-----------",
"--------MAKSGIAAGVNKGRKTTAKE----------VAPKISYRKGASSQRTVFVRSIVKEVAGLAPYERRLIELIRNAGE---KRAKKLAKKRLGTHKRALRKVEEMTQVIAESRRH-------------",
"-------MAVRYELAIGLNKGHKTSKIRNVKYTGDKKVKGLRGSRLKNIQTRHTKFMRDLVREVVGHAPYEKRTMELLKVSKD---KRALKFLKRRLGTHIRAKRKREELSNILTQLRKAQTHAK--------",
"-------MAVKTGIAIGLNKGKKVTQMT----------PAPKISYKKGAASNRTKFVRSLVREIAGLSPYERRLIDLIRNSGE---KRARKVAKKRLGSFTRAKAKVEEMNNIIAASRRH-------------",
"-------MTVKTGIAIGLNKGKKVTSMT----------PAPKISYKKGAASNRTKFVRSLVREIAGLSPYERRLIDLIRNSGE---KRARKVAKKRLGSFTRAKAKVEEMNNIIAASRRH-------------",
"---------MAKGQAVGINKGFITTQLE-------KKLQKHSAVQRKGKLGKRVALVRQVIREVTGFAPYEKRIIELIKAGSAKDSKKATKIARKRLGTHRRAKVKKALLEEAVRAQRKK-------------",
"MSSAATKPVKRSGIIKGFNKGHAVAKRT------------VTSTFKKQVVTKRVAAIRDVIREISGFSPYERRVSELLKSGLD---KRALKVAKKRLGSIQAGKKKRDDIANINRKASAK-------------",
"MKNA--------------------YKKVRVRYPVKRPDVKRKQRGPRAETQESRFLAAAVADEISGLSPLEKKAISLLEAKNN---NKAQKLLRKRLGSHKRAVAKVEKLARMLLEK----------------"
]
msa_val = MSA(valid_msa)
#invalid_msas = [MSA(msa) for msa in invalid_msa]
pssm = msa_val.get_pssm()
#print('Rows ' + str(rows) + ' Columns ' + str(columns))
print(pssm)<file_sep>import itertools
import json
import math
import re
from pathlib import Path
import numpy as np
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.db = list()
self.word_search_results = dict()
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
self.db.append(sequence)
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
if word in self.word_search_results:
return self.word_search_results[word]
else:
search_result = list(filter(lambda x: word in x, self.db))
self.word_search_results[word] = search_result
return self.word_search_results[word]
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corresponding to the mentioned
statistics (in order of listing above).
"""
unique_words = dict()
avg_num_words_per_sequence = 0.0
for sequence in self.db:
words = set(sequence[i: i + 3] for i in range(len(sequence) - 3 + 1))
for word in words:
unique_words[word] = unique_words.get(word, 0) + 1
if len(sequence) > 2:
avg_num_words_per_sequence += len(words)
num_seq = len(self.db)
num_of_unique_words = len(unique_words.keys())
avg_num_words_per_sequence = avg_num_words_per_sequence / num_seq
avg_num_sequences_per_word = sum(unique_words.values()) / len(unique_words.keys())
return (num_seq,
num_of_unique_words,
int(avg_num_words_per_sequence + 0.5),
int(avg_num_sequences_per_word + 0.5))
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.substitution_matrix = substitution_matrix
def get_score(self, query, target):
assert len(query) == len(target) and len(query) == 3
score = 0
for i in range(3):
score += self.substitution_matrix[AA_TO_INT[query[i]]][AA_TO_INT[target[i]]]
return score
def get_pssm_score(self, query, target):
assert target.shape == (3, 20) and len(query) == 3
score = 0
for i in range(3):
score += target[i][AA_TO_INT[query[i]]]
return score
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
global ALPHABET
results = set()
if sequence:
# Search in sequence to find similar words
for start_index in range(0, len(sequence) - 2):
words = itertools.product(ALPHABET, repeat=3)
target = sequence[start_index: start_index + 3]
for query in words:
query = ''.join(query)
if self.get_score(query, target) >= T:
results.add(query)
else:
# Search in PSSM to find similar words
for start_row in range(0, pssm.shape[0] - 2):
words = itertools.product(ALPHABET, repeat=3)
target = pssm[start_row: start_row + 3]
for query in words:
query = ''.join(query)
if self.get_pssm_score(query, target) >= T:
results.add(query)
return list(results)
def get_hsp(self, *, query, pssm, target, query_start_pos, target_start_pos, drop_threshold):
query_end_pos = query_start_pos + 3
target_end_pos = target_start_pos + 3
if query:
current_score = self.get_score(query[query_start_pos: query_end_pos],
target[target_start_pos: target_end_pos])
else:
current_score = self.get_pssm_score(target[target_start_pos: target_end_pos],
pssm[query_start_pos: query_end_pos])
hsp_length = 3
best_score = current_score
best_query_start_pos = query_start_pos
best_target_start_pos = target_start_pos
best_hsp_length = hsp_length
# First go right till score drops by drop_threshold
if query:
query_len = len(query)
else:
query_len = pssm.shape[0]
while query_end_pos < query_len and target_end_pos < len(target):
query_end_pos += 1
target_end_pos += 1
if query:
current_score += self.substitution_matrix[AA_TO_INT[query[query_end_pos - 1]]][
AA_TO_INT[target[target_end_pos - 1]]]
else:
current_score += pssm[query_end_pos - 1][AA_TO_INT[target[target_end_pos - 1]]]
if current_score > best_score:
best_score = current_score
best_hsp_length = query_end_pos - query_start_pos
if current_score <= best_score - drop_threshold:
break
query_end_pos = query_start_pos + best_hsp_length
current_score = best_score
while query_start_pos > 0 and target_start_pos > 0:
query_start_pos -= 1
target_start_pos -= 1
if query:
current_score += self.substitution_matrix[AA_TO_INT[query[query_start_pos]]][
AA_TO_INT[target[target_start_pos]]]
else:
current_score += pssm[query_start_pos][AA_TO_INT[target[target_start_pos]]]
if current_score > best_score:
best_score = current_score
best_hsp_length = query_end_pos - query_start_pos
best_query_start_pos = query_start_pos
best_target_start_pos = target_start_pos
if current_score <= best_score - drop_threshold:
break
return best_query_start_pos, best_target_start_pos, best_hsp_length, best_score
def findall(self, p, s):
'''
Yields all the positions of the pattern p in the string s.
'''
i = s.find(p)
while i != -1:
yield i
i = s.find(p, i + 1)
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplicates).
Only a sequence or PSSM will be provided, not both at the same time.
:type query: str
:type blast_db: BlastDb
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
all_hsp = dict()
for word_start_pos_in_query in range(len(query) - 2 if query else pssm.shape[0] - 2):
# print('word_start_pos_in_query: %3d/%3d' % (word_start_pos_in_query, (len(query) if query else pssm.shape[0])-2))
sequence = query[word_start_pos_in_query: word_start_pos_in_query + 3] if query else None
pssm_sub = pssm if pssm is None else pssm[word_start_pos_in_query: word_start_pos_in_query + 3]
words = self.get_words(sequence=sequence,
pssm=pssm_sub,
T=T)
for word in words:
matching_sequences = blast_db.get_sequences(word)
for target in matching_sequences:
for match in self.findall(word, target): # Find all matches
# Find HSP
# new_hsp = hsp_start_pos_in_query, hsp_start_pos_in_target, hsp_length, total_score
new_hsp = self.get_hsp(query=query,
pssm=pssm,
target=target,
query_start_pos=word_start_pos_in_query,
target_start_pos=match,
drop_threshold=X)
if new_hsp[3] >= S:
updated_list = all_hsp.get(target, [])
if new_hsp not in updated_list:
updated_list.append(new_hsp)
all_hsp[target] = updated_list
return all_hsp
def get_two_hit_hsp(self,
query,
pssm,
target,
hit1_query_start_pos,
hit1_target_start_pos,
hit2_query_start_pos,
hit2_target_start_pos,
drop_threshold):
query_end_pos = hit2_query_start_pos + 3
target_end_pos = hit2_target_start_pos + 3
if query:
current_hsp_score = self.get_score(query[hit2_query_start_pos: query_end_pos],
target[hit2_target_start_pos: target_end_pos])
else:
current_hsp_score = self.get_pssm_score(target[hit2_target_start_pos: target_end_pos],
pssm[hit2_query_start_pos: query_end_pos])
# First go left from hit2 till score drops by drop_threshold or we reach sequence beginning.
if query:
query_len = len(query)
else:
query_len = pssm.shape[0]
hsp_length = 3
best_score = current_hsp_score
best_query_start_pos = hit2_query_start_pos
best_target_start_pos = hit2_target_start_pos
best_hsp_length = hsp_length
# hit2_query_start_pos and hit2_target_start_pos now acts as hsm indices
while hit2_query_start_pos > 0 and hit2_target_start_pos > 0:
hit2_query_start_pos -= 1
hit2_target_start_pos -= 1
if query:
current_hsp_score += self.substitution_matrix[AA_TO_INT[query[hit2_query_start_pos]]][
AA_TO_INT[target[hit2_target_start_pos]]]
else:
current_hsp_score += pssm[hit2_query_start_pos][AA_TO_INT[target[hit2_target_start_pos]]]
if current_hsp_score > best_score:
best_score = current_hsp_score
best_hsp_length = query_end_pos - hit2_query_start_pos
best_query_start_pos = hit2_query_start_pos
best_target_start_pos = hit2_target_start_pos
if current_hsp_score <= best_score - drop_threshold:
break
if best_query_start_pos > hit1_query_start_pos + 3:
return None
current_hsp_score = best_score
while query_end_pos < query_len and target_end_pos < len(target):
query_end_pos += 1
target_end_pos += 1
if query:
current_hsp_score += self.substitution_matrix[AA_TO_INT[query[query_end_pos - 1]]][
AA_TO_INT[target[target_end_pos - 1]]]
else:
current_hsp_score += pssm[query_end_pos - 1][AA_TO_INT[target[target_end_pos - 1]]]
if current_hsp_score > best_score:
best_score = current_hsp_score
best_hsp_length = query_end_pos - best_query_start_pos
if current_hsp_score <= best_score - drop_threshold:
break
return best_query_start_pos, best_target_start_pos, best_hsp_length, best_score
def get_all_targets_hit(self, T, blast_db, pssm, query):
hits = dict()
for word_start_pos_in_query in range(len(query) - 2 if query else pssm.shape[0] - 2):
sequence = query[word_start_pos_in_query: word_start_pos_in_query + 3] if query else None
pssm_sub = pssm if pssm is None else pssm[word_start_pos_in_query: word_start_pos_in_query + 3]
words = self.get_words(sequence=sequence, pssm=pssm_sub, T=T)
for word in words:
matching_sequences = blast_db.get_sequences(word)
for target in matching_sequences:
for match in self.findall(word, target): # Find all matches(overlap considered)
# Save only Hits
word_start_pos_in_target = match
target_hits = hits.get(target, [])
target_hits.append((word_start_pos_in_query, word_start_pos_in_target))
hits[target] = target_hits
return hits
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplicates).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
# Dictionary to store target sequences as keys and hit position in query and target.
hits = self.get_all_targets_hit(T, blast_db, pssm, query)
all_hsp = dict()
for target in hits.keys():
target_hits = hits.get(target, [])
if len(target_hits) < 2:
continue
# Sort the list first
target_hits = sorted(target_hits)
# Keeps track of which hit has not been used.
is_free_to_pick = [True] * len(target_hits)
for hit1_index, hit1 in enumerate(target_hits):
for hit2_index in range(hit1_index + 1, len(target_hits)):
if not is_free_to_pick[hit2_index]:
continue
hit2 = target_hits[hit2_index]
hit_aligns = ((hit2[0] - hit1[0]) >= 3) and ((hit2[0] - hit1[0]) <= A) and ((hit2[1] - hit2[0]) == (hit1[1] - hit1[0])) # Same diagonal
# hit_aligns = ((hit2[0] - hit1[0]) <= A) and \
# ((hit2[1] - hit2[0]) == (hit1[1] - hit1[0]))
if not hit_aligns:
continue
# Check if hsp from hit 2 reaches hit1
# new_hsp = hsp_start_pos_in_query, hsp_start_pos_in_target, hsp_length, total_score
new_hsp = self.get_two_hit_hsp(query=query, pssm=pssm, target=target, hit1_query_start_pos=hit1[0], hit1_target_start_pos=hit1[1], hit2_query_start_pos=hit2[0], hit2_target_start_pos=hit2[1], drop_threshold=X)
# ToDo: If we dont find a high scoring HSP, then we wont find high scoring HSP for any other hit2_index. Optimize it.
# If we didnt find good scoring hsp with this hit, we wont find with another hit.
if new_hsp is None or new_hsp[3] < S:
break
# new_hsp = new_hsp + hit1 + hit2
updated_list = all_hsp.get(target, [])
if new_hsp in updated_list:
print(target_hits)
updated_list.append(new_hsp)
all_hsp[target] = updated_list
is_free_to_pick[hit1_index] = False # This is of no use as hit1_index is visited once.
is_free_to_pick[hit2_index] = False
# Check all other hits which are now covered in this new_hsp
for hit_index in range(0, len(target_hits)):
if not is_free_to_pick[hit_index]:
continue
hit = target_hits[hit_index]
# new_hsp = hsp_start_pos_in_query, hsp_start_pos_in_target, hsp_length, total_score
# ToDo: Check beginning oh hit is in hsp
if new_hsp[0] <= hit[0] < new_hsp[0] + new_hsp[2] and \
new_hsp[1] <= hit[1] < new_hsp[1] + new_hsp[2] and \
((hit[1] - hit[0]) == (hit1[1] - hit1[0])):
is_free_to_pick[hit_index] = False
return all_hsp
def table_list_tuple1(data):
for key, value in data.items():
data[key] = [tuple(x) for x in value]
return data
def is_in_Release_Mode():
import sys
gettrace = getattr(sys, 'gettrace', None)
if gettrace is None:
print('No sys.gettrace')
return True
elif gettrace():
print('Hmm, Big Debugger is watching me')
return False
else:
return True
if __name__ == '__main__':
blast_db = BlastDb()
test_json = 'tests/blast_test.json'
relative_path = Path(__file__).parent
with Path(relative_path, test_json).open('r') as json_file:
json_data = json.load(json_file)
if is_in_Release_Mode():
for s in json_data['db_sequences']:
blast_db.add_sequence(s)
sub_matrix = np.array(json_data['sub_matrix'], dtype=np.int64)
blast = Blast(sub_matrix)
results = blast.search_two_hit(blast_db,
query=json_data['query_seq'],
T=11,
X=5,
S=30,
A=40)
two_hit_expected = table_list_tuple1(json_data['blast_hsp_two_hit_1'])
for index, key in enumerate(results.keys()):
if two_hit_expected.get(key) and set(results.get(key)) == set(two_hit_expected.get(key)):
continue
# if len(results[key]) != 2:
# continue
print(key)
print('Two Hit Actual ', results.get(key))
print('Two hit Expected ', two_hit_expected.get(
key))
print('*' * 10)
else:
blast_db.add_sequence(
'MAQALSEEEFQRMQTQLLELRTNNYQLSDELRKNGVELSSLRQKVAYLDKEFSKAQKALSKSKKAQEVEVLLSEKEMLQAKLHSQEEDFRLQNSTLMAEFSKLCSQLEQLELENRQLKEGVPGAAGPHVDGELLRLQAENTALQKNMAALQERYGKEAVRPSAVSEGQGDPPGDVLPISLSPMPLAEVELKWEMEREEKKLLWEQLQGLESSKQAETSRLQEELAKLSEKLKKKQESFCRLQTEKETLFNDSRNKIEELQQR<KEY>AN')
sub_matrix = np.array(json_data['sub_matrix'], dtype=np.int64)
blast = Blast(sub_matrix)
results = blast.search_two_hit(blast_db,
pssm=np.array(json_data['query_pssm'], dtype=np.int64),
T=11,
X=5,
S=30,
A=40)
two_hit_expected = table_list_tuple1(json_data['blast_hsp_two_hit_1'])
for index, key in enumerate(results.keys()):
if two_hit_expected.get(key) and set(results.get(key)) == set(two_hit_expected.get(key)):
continue
# if len(results[key]) != 2:
# continue
print(key)
print('Two Hit Actual ', results.get(key))
print('Two hit Expected ', two_hit_expected.get(
key))
print('*' * 10)
<file_sep>import numpy as np
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
self.sequences = sequences
if len(sequences) == 0:
raise TypeError('List of sequences can not be empty!!!')
self.seq_length = len(sequences[0])
for sequence in sequences:
if len(sequence) != self.seq_length:
raise TypeError('All sequences must have same length')
for s in sequence:
if s not in ALPHABET:
raise TypeError('Invalid alphabet {} present in sequence'.format(sequence))
self.amino_acids_per_col = [dict() for x in range(self.seq_length)]
self.r_value_per_column = None
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
number_of_sequences = len(self.sequences)
length_of_msa = len(self.sequences[0])
return number_of_sequences, length_of_msa
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
primary_sequence = self.sequences[0]
primary_sequence = primary_sequence.replace('-', '')
return primary_sequence
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
self.get_amino_acids_columns()
self.r_value_per_column = [len(self.amino_acids_per_col[i].keys()) for i in range(self.seq_length)]
sequence_weights_matrix = np.zeros((len(self.sequences), self.seq_length))
sequence_weights = np.zeros(len(self.sequences))
self.get_sequence_weights_matrix(sequence_weights, sequence_weights_matrix)
return sequence_weights.astype(np.float64)
def get_sequence_weights_matrix(self, sequence_weights, sequence_weights_matrix):
for row_idx, sequence in enumerate(self.sequences):
for col_idx, alphabet in enumerate(sequence):
sequence_weights_matrix[row_idx, col_idx] = 1 / (self.r_value_per_column[col_idx] * self.amino_acids_per_col[col_idx][alphabet])
if self.r_value_per_column[col_idx] > 1:
sequence_weights[row_idx] += sequence_weights_matrix[row_idx, col_idx]
def get_amino_acids_columns(self):
for i in range(self.seq_length):
amino_acids_idx_i = dict()
for sequence in self.sequences:
amino_acids_idx_i[sequence[i]] = amino_acids_idx_i.get(sequence[i], 0) + 1
self.amino_acids_per_col[i] = amino_acids_idx_i
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
self.get_sequence_weights()
number_of_observations = np.float64(sum(i for i in self.r_value_per_column)) / self.seq_length
return number_of_observations
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
pssm - position-specific scoring matrix
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param use_sequence_weights: Calculate and apply sequence weights.
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
if use_sequence_weights:
sequence_weights = self.get_sequence_weights()
if bg_matrix:
bg_matrix = np.array([np.array(element) for element in bg_matrix])
else:
bg_matrix = np.zeros((20, 20)) + (1.0 / (20 * 20))
amino_acid_bg_frequency = bg_matrix.sum(axis=1)
amino_acid_bg_frequency = amino_acid_bg_frequency.reshape(20, 1)
observed_amino_acid_plus_gaps_counts_matrix = np.zeros((len(ALPHABET), self.seq_length))
for sequence_idx, sequence in enumerate(self.sequences):
single_amino_acid_weight = 1
if use_sequence_weights:
single_amino_acid_weight = sequence_weights[sequence_idx]
for i, sequence_alphabet in enumerate(sequence):
observed_amino_acid_plus_gaps_counts_matrix[AA_TO_INT[sequence_alphabet], i] += single_amino_acid_weight
if redistribute_gaps:
observed_amino_acid_plus_gaps_counts_matrix[0:len(ALPHABET) - 1, :] += observed_amino_acid_plus_gaps_counts_matrix[len(ALPHABET) - 1, :] * amino_acid_bg_frequency
observed_amino_acid_plus_gaps_counts_matrix = np.delete(observed_amino_acid_plus_gaps_counts_matrix, (len(ALPHABET) - 1), axis=0)
if add_pseudocounts:
observed_amino_acid_plus_gaps_counts_matrix = self.compute_pseudocounts(amino_acid_bg_frequency, beta, bg_matrix, observed_amino_acid_plus_gaps_counts_matrix)
normalized_observed_amino_acid_plus_gaps_counts_matrix = observed_amino_acid_plus_gaps_counts_matrix / np.sum(observed_amino_acid_plus_gaps_counts_matrix, axis=0)
pssm = 2 * np.log2(normalized_observed_amino_acid_plus_gaps_counts_matrix / amino_acid_bg_frequency)
pssm[np.isinf(pssm)] = -20
pssm = pssm[:, [i for i, primary_seq_char in enumerate(self.sequences[0]) if primary_seq_char is not '-']]
pssm = np.rint(pssm.T).astype(np.int64)
return pssm
def compute_pseudocounts(self, amino_acid_bg_frequency, beta, bg_matrix, observed_amino_acid_plus_gaps_counts_matrix):
pseudocounts = np.zeros(observed_amino_acid_plus_gaps_counts_matrix.shape)
for row in range(observed_amino_acid_plus_gaps_counts_matrix.shape[0]):
for column in range(observed_amino_acid_plus_gaps_counts_matrix.shape[1]):
if observed_amino_acid_plus_gaps_counts_matrix[row, column] == 0:
continue
pseudocounts[:, column] += (observed_amino_acid_plus_gaps_counts_matrix[row][column] * bg_matrix[row, :]) / amino_acid_bg_frequency[
row, 0]
N = self.get_number_of_observations()
observed_amino_acid_plus_gaps_counts_matrix = ((N - 1) * observed_amino_acid_plus_gaps_counts_matrix + pseudocounts * beta) / (N - 1 + beta)
return observed_amino_acid_plus_gaps_counts_matrix
<file_sep>import numpy as np
import json
import re
from collections import defaultdict
from pathlib import Path
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.word_size = 3
self.sequence_to_words = defaultdict(frozenset)
self.word_to_sequences = defaultdict(list)
self.num_seqs = 0
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
self.num_seqs += 1
subs_len = len(sequence) - (self.word_size - 1)
words = frozenset(sequence[i:i+self.word_size] for i in range(subs_len))
if sequence not in self.sequence_to_words:
self.sequence_to_words[sequence] = (words, 1)
else:
self.sequence_to_words[sequence] = (words, self.sequence_to_words[sequence][1] + 1)
for word in words:
self.word_to_sequences[word].append(sequence)
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
return list(self.word_to_sequences[word])
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
average_words_per_seq = self.calc_average_words_per_seq()
average_seqs_per_word = self.calc_average_seqs_per_word()
stats = self.num_seqs, len(self.word_to_sequences), average_words_per_seq, average_seqs_per_word
print(stats)
return stats
def calc_average_words_per_seq(self):
return round(sum(len(words[0]) * words[1] for _, words in self.sequence_to_words.items()) / self.num_seqs)
def calc_average_seqs_per_word(self):
return round(sum(len(seq) for _, seq in self.word_to_sequences.items()) / len(self.word_to_sequences))
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.substitution_matrix = substitution_matrix
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
words_with_index = self.get_words_with_pos(sequence=sequence, pssm=pssm, T=T)
return set([w[0] for w in words_with_index])
def get_words_with_pos(self, *, sequence=None, pssm=None, T=11):
if sequence != None:
return self.get_words_for_sequence(sequence, T)
else:
return self.get_words_for_pssm(pssm, T)
def get_words_for_sequence(self, sequence, T):
words = []
for i in range(20):
for j in range(20):
for k in range(20):
for seq_idx in range(len(sequence) - 2):
score = self.substitution_matrix[i, AA_TO_INT[sequence[seq_idx]]]
score += self.substitution_matrix[j, AA_TO_INT[sequence[seq_idx + 1]]]
score += self.substitution_matrix[k, AA_TO_INT[sequence[seq_idx + 2]]]
if score >= T:
words.append((INT_TO_AA[i] + INT_TO_AA[j] + INT_TO_AA[k], seq_idx, score))
return set(words)
def get_words_for_pssm(self, pssm, T):
words = []
rows = pssm.shape[0]
for i in range(20):
for j in range(20):
for k in range(20):
for row_idx in range(rows - 2):
score = pssm[row_idx, i] + pssm[row_idx + 1, j] + pssm[row_idx + 2, k]
if score >= T:
words.append((INT_TO_AA[i] + INT_TO_AA[j] + INT_TO_AA[k], row_idx, score))
return set(words)
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
# Get all words satisfying the initial T-score for a query or pssm
t_words = self.get_words_with_pos(sequence=query, pssm=pssm, T=T)
# Get all sequences in the BlastDB which contain one of t_words and iterate through them
hsp_dict = defaultdict(list)
for t_word_pos in t_words:
pot_targets = blast_db.get_sequences(t_word_pos[0])
for pot_target in pot_targets:
# We have now the word, the start_pos in the query and the potential target
self.find_hsp(hsp_dict, query, pssm, pot_target, t_word_pos, X, S)
return hsp_dict
def find_hsp(self, hsp_dict, query, pssm, target, t_word_pos, X, S):
# Search where the word appears in the target -> multiple possibilities
target_s_poses = self.find_start_pos_in_target(t_word_pos, target)
for target_s_pos in target_s_poses:
hsp_state = self.extend_right(query, pssm, target, t_word_pos, target_s_pos, X)
hsp_state = self.extend_left(query, pssm, target, hsp_state, X)
# Check end score
if hsp_state[4] >= S:
hsp = hsp_state[2], hsp_state[0], hsp_state[1] - hsp_state[0] + 1, hsp_state[4]
if hsp not in hsp_dict[target]:
hsp_dict[target].append(hsp)
def extend_right(self, query, pssm, target, t_word_pos, target_s_pos, X):
# end_pos is inclusive
target_e_pos = target_s_pos + 2
_, query_s_pos, cur_score = t_word_pos
query_e_pos = query_s_pos + 2
highest_score = cur_score
highest_score_target_e_pos = target_e_pos
# Extend to right, stop if target/query reaches end
if query != None:
q_len = len(query)
else:
q_len = pssm.shape[0]
while target_e_pos < len(target) - 1 and query_e_pos < q_len - 1:
query_e_pos += 1
target_e_pos += 1
next_aa_target = AA_TO_INT[target[target_e_pos]]
if query != None:
next_aa_query = AA_TO_INT[query[query_e_pos]]
cur_score += self.substitution_matrix[next_aa_query, next_aa_target]
else:
cur_score += pssm[query_e_pos, next_aa_target]
# stop if highest_score - X >= current_score
if highest_score - X >= cur_score:
break
# New high score -> Set new highscore and new high_score end position
if cur_score > highest_score:
highest_score = cur_score
highest_score_target_e_pos = target_e_pos
# Prepare best extension
target_e_pos = highest_score_target_e_pos
query_e_pos = query_s_pos + (target_e_pos - target_s_pos)
return (target_s_pos, target_e_pos, query_s_pos, query_e_pos, highest_score)
def extend_left(self, query, pssm, target, hsp_state, X):
target_s_pos, target_e_pos, query_s_pos, query_e_pos, cur_score = hsp_state
highest_score = cur_score
highest_score_target_s_pos = target_s_pos
# Extend to left, stop if target/query reaches string start
while target_s_pos > 0 and query_s_pos > 0:
query_s_pos -= 1
target_s_pos -= 1
next_aa_target = AA_TO_INT[target[target_s_pos]]
if query != None:
next_aa_query = AA_TO_INT[query[query_s_pos]]
cur_score += self.substitution_matrix[next_aa_query, next_aa_target]
else:
cur_score += pssm[query_s_pos, next_aa_target]
# stop if highest_score - X >= current_score
if highest_score - X >= cur_score:
break
# New high score -> Set new highscore and new high_score end position
if cur_score > highest_score:
highest_score = cur_score
highest_score_target_s_pos = target_s_pos
# Prepare best extension
target_s_pos = highest_score_target_s_pos
query_s_pos = query_e_pos - (target_e_pos - target_s_pos)
return (target_s_pos, target_e_pos, query_s_pos, query_e_pos, highest_score)
def find_start_pos_in_target(self, t_word_pos, target):
search_regex = '(?=' + t_word_pos[0] +')'
return [m.start() for m in re.finditer(search_regex, target)]
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
# Get all words satisfying the initial T-score for a query or pssm
t_words = self.get_words_with_pos(sequence=query, pssm=pssm, T=T)
# Get all sequences in the BlastDB which contain one of t_words and iterate through them
t_sequence_to__t_words = defaultdict(set)
for t_word_pos in t_words:
pot_targets = blast_db.get_sequences(t_word_pos[0])
for pot_target in pot_targets:
t_sequence_to__t_words[pot_targets]
def json_data():
test_json = 'tests/blast_test.json'
relative_path = Path(__file__).parent
with Path(relative_path, test_json).open('r') as json_file:
json_data = json.load(json_file)
return json_data
if __name__ == '__main__':
sub_matrix = np.array(json_data()['sub_matrix'], dtype=np.int64)
q_seq = json_data()['query_seq']
blast_words = json_data()['blast_words']
b = Blast(sub_matrix)
result_words = b.get_words(sequence=q_seq, T=13)
<file_sep>import sys
from collections import Counter
from pprint import pprint
from typing import List
import numpy as np
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = "ACDEFGHIKLMNPQRSTVWY-"
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT["-"]
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
if len(sequences) == 0:
raise TypeError()
self.sequences: List[str] = sequences
self.sequence_len = len(sequences[0])
for sequence in sequences:
if len(sequence) != self.sequence_len:
raise TypeError()
for letter in sequence:
if letter not in ALPHABET:
raise TypeError()
self.number_of_sequences = len(sequences)
def _count_observed_amino_acids(self, sequence_weights: List[float] = None):
"""Counts the number of observed amino acids.
Iterates over the sequence length and determines the distribution of AAs.
"""
if sequence_weights is None:
sequence_weights = np.ones((self.sequence_len,))
counters = [Counter() for _ in range(self.sequence_len)]
for idx, sequence in enumerate(self.sequences):
for position in range(self.sequence_len):
counters[position][sequence[position]] += sequence_weights[idx]
return counters
def get_pssm(
self,
*,
bg_matrix: np.ndarray = None,
beta: float = 10,
use_sequence_weights=False,
redistribute_gaps=False,
add_pseudocounts=False
):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param aa_frequencies: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
FIRST_VALID = 8
if bg_matrix is None:
bg_matrix = np.ones((20, 20)) * 20 ** -2
else:
bg_matrix = np.asarray(bg_matrix)
aa_frequencies = bg_matrix.sum(axis=0)
# 1. count observed amino acids
sequence_weights = None
if use_sequence_weights:
sequence_weights = self.get_sequence_weights()
counters = self._count_observed_amino_acids(sequence_weights)
# 2. construct a weight matrix out of this by just filling the information from the counters into a np array
weight_matrix = np.zeros((self.sequence_len, len(ALPHABET)))
for idx, row in enumerate(weight_matrix):
for jdx, column in enumerate(row):
weight_matrix[idx, jdx] = counters[idx][INT_TO_AA[jdx]]
np.set_printoptions(threshold=sys.maxsize)
print("construct weight matrix")
pprint(weight_matrix[FIRST_VALID, :])
# 3. redistribute gaps
if redistribute_gaps:
redistribution = (
np.expand_dims(weight_matrix[:, -1], axis=1) * aa_frequencies
)
weight_matrix[:, :-1] += redistribution
print("redistribute gaps")
pprint(weight_matrix[FIRST_VALID])
# 4. add weighted pseudo counts
if add_pseudocounts:
N = self.get_number_of_observations()
alpha = N - 1
for idx, row in enumerate(weight_matrix):
pseudo_count = 0
for jdx, freq in enumerate(row[:-1]):
background_frequency = aa_frequencies[jdx]
subsitution_frequencies = bg_matrix[jdx]
pseudo_count += (
freq / background_frequency * subsitution_frequencies
)
weight_matrix[idx, :-1] = (
alpha * weight_matrix[idx, :-1] + beta * pseudo_count
) / (alpha + beta)
print("add pseudo counts")
pprint(weight_matrix[FIRST_VALID])
# 5. normalize to relative frequencies
weight_matrix /= np.expand_dims(
np.sum(weight_matrix, axis=1) - weight_matrix[:, AA_TO_INT["-"]], axis=1
) # ignore the gaps
print("normalize")
pprint(weight_matrix[FIRST_VALID])
# 6. divide by background frequencies
print("divide by background frequencies")
weight_matrix[:, :-1] /= aa_frequencies
pprint(weight_matrix[FIRST_VALID, :])
# 7. logarize matrix
weight_matrix[[weight_matrix == 0]] = 2 ** -10
weight_matrix = 2 * np.log2(weight_matrix)
print("logarize")
pprint(weight_matrix[FIRST_VALID, :])
# 8. remove primary sequence gaps
row_idx_keep = list(range(self.sequence_len))
for idx, aa in enumerate(self.sequences[0]):
if aa == "-":
row_idx_keep.remove(idx)
pssm = weight_matrix[row_idx_keep, :-1] # remove gaps
print("remove gaps")
pprint(pssm[0, :])
return np.rint(pssm).astype(np.int64)
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return self.number_of_sequences, self.sequence_len
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
return self.sequences[0].replace("-", "")
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
# fill counter
counters = self._count_observed_amino_acids()
weight_matrix = np.zeros((self.sequence_len, self.number_of_sequences))
for idx, sequence in enumerate(self.sequences):
for position in range(self.sequence_len):
curr_counter = counters[position]
number_of_letters = len(curr_counter.keys())
if number_of_letters > 1:
weight_matrix[position, idx] = 1 / (
curr_counter[sequence[position]] * number_of_letters
)
weights = np.sum(weight_matrix, axis=0)
return weights.astype(np.float64)
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
counters = self._count_observed_amino_acids()
return np.average([len(counter.keys()) for counter in counters]).astype(
np.float64
)
<file_sep>##############
# Exercise 2.6
##############
from collections import Counter
import ex1
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
self.filepath = filepath
def get_counts(self):
return len(self.read_fasta(self.filepath))
pass
def get_average_length(self):
sequences = self.read_fasta(self.filepath)
summ = 0
for i in range(len(sequences)):
print(sequences[i])
summ += len(sequences[i][1])
return summ/self.get_counts()
pass
def read_fasta(self, path):
with open(path,"r") as f:
h = ""
s = ""
sequence_started = False
res = []
for line in f:
if line.startswith(">") or line.startswith(";"):
if sequence_started == False :
if s != "":
ex1.add_sequence(s, res)
s = ""
h+=line.strip()
sequence_started = True
else :
h += line.strip()
sequence_started = False
else :
s += line.strip()
if h != "":
ex1.add_sequence(h, res)
h = ""
sequence_started = False
ex1.add_sequence(s, res)
ress = []
for i in range(0,len(res)-1,2):
res[i+1] = res[i+1].replace("*","")
ress.append((res[i], res[i+1]))
return ress
def get_abs_frequencies(self):
# return number of occurences not normalized by length
sequences = ""
for i in range (len(self.read_fasta(self.filepath))):
sequences += self.read_fasta(self.filepath)[i][1]
nbrs = dict(Counter(sequences).most_common())
print(nbrs)
return nbrs
pass
def get_av_frequencies(self):
# return number of occurences normalized by length
sequences = ""
for i in range (len(self.read_fasta(self.filepath))):
sequences += self.read_fasta(self.filepath)[i][1]
dicto = self.get_abs_frequencies()
for i in dicto :
dicto[i] = dicto[i]/len(sequences)
return dicto
pass
<file_sep>##############
# Exercise 2.5
##############
# You can use the supplied test cases for your own testing. Good luck!
def get_orfs(genome):
allowed = set('A'+'T'+'G'+'C')
if not(set(genome)<=allowed):
raise TypeError("not a genome")
genome_reverse = complementary(genome)
genome_reverse = genome_reverse[::-1]
orf_list=[]
for y in [genome, genome_reverse]:
for x in range (0,3):
reverse = y==genome_reverse
start=y[x:]
beginIndex=0
currentIndex=x
startIndex=-1
while(len(start)>2):
#startIndex = start.find("ATG");
#if startIndex%3==0:
if start[0:3]=='ATG':
startIndex=currentIndex
#we found a starting sequence
stopIndex=-1
#+y[:startIndex]
finalStopIndex=-2
print("start ", start)
for stopSequence in ["TGA", "TAA", "TAG"]:
beginStopIndex=3
currentStopIndex=currentIndex+3
internalBegin=0
internalRest=start
print("found starting seq ", startIndex, "currentStopIndex", currentStopIndex)
print("stopSequence ", stopSequence, " beginStopIndex ",beginStopIndex)
while (len(internalRest)>2 and finalStopIndex<0):
internalRest=start[beginStopIndex:]
print(internalRest)
if internalRest[0:3]==stopSequence:
#internalRest=internalRest[internalBegin:]
#stopIndex = internalRest.find(stopSequence);
#print ("stopIndex ",stopIndex)
#if stopIndex%3==0:
stopIndex=currentStopIndex
print("found stop seq ", stopIndex, "finalstop ", finalStopIndex)
if stopIndex>finalStopIndex:
finalStopIndex = stopIndex
currentStopIndex += 3
print("length internal " , len(internalRest)," beginStopIndex ", beginStopIndex)
beginStopIndex +=3
if (finalStopIndex>-1) and ((finalStopIndex-startIndex)>33):
#if(finalStopIndex>len(start)):
# finalStopIndex=finalStopIndex-(len(start[startIndex:])-1)
aa = codons_to_aa(y[startIndex:startIndex+finalStopIndex+3])
#if reverse:
# orf_list.append((len(y)-startIndex-x, len(y)-startIndex-x-finalStopIndex-3, aa,reverse))
#else:
orf_list.append((startIndex, startIndex+finalStopIndex+2, aa,reverse))
#elif (startIndex==-1):
# start=''
currentIndex += 3
start=start[3:]
print ("startIndex ", +startIndex)
return orf_list
lookup_table = {
'GCT':'A',
'GCC':'A',
'GCT':'A',
'GCA':'A',
'GCG':'A',
'CGT':'R',
'CGC':'R',
'CGA':'R',
'CGG':'R',
'AGA':'R',
'AGG':'R',
'AAT':'N',
'AAC':'N',
'GAT':'D',
'GAC':'D',
'TGT':'C',
'TGC':'C',
'CAA':'Q',
'CAG':'Q',
'GAA':'E',
'GAG':'E',
'GGT':'G',
'GGC':'G',
'GGA':'G',
'GGG':'G',
'CAT':'H',
'CAC':'H',
'ATC':'I',
'ATT':'I',
'ATA':'I',
'TTA':'L',
'TTG':'L',
'GTT':'L',
'CTC':'L',
'CTT':'L',
'CTA':'L',
'CTG':'L',
'AAA':'K',
'AAG':'K',
'ATG':'M',
'TTT':'F',
'TTC':'F',
'CCT':'P',
'CCC':'P',
'CCA':'P',
'CCG':'P',
'TCT':'S',
'TCC':'S',
'TCA':'S',
'TCG':'S',
'AGT':'S',
'AGC':'S',
'ACT':'T',
'ACC':'T',
'ACA':'T',
'ACG':'T',
'TGG':'W',
'TAT':'Y',
'TAC':'Y',
'GTT':'V',
'GTC':'V',
'GTA':'V',
'GTG':'V',
'TAA':'',
'TAG':'',
'TGA':''
}
def codons_to_aa(sequence):
aa=''
part=''
while len(sequence)>=3:
part=sequence[0:3]
aa=aa+lookup_table[part]
sequence=sequence[3:]
return aa
def complementary(nucleobases):
if nucleobases == "ATGC":
return "TACG"
result=''
for c in nucleobases:
if c=='A':
result+='T'
elif c=='C':
result+='G'
elif c=='G':
result+='C'
elif c=='T':
result+='A'
return result
<file_sep>import numpy as np
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
if len(sequences) == 0 or len(set(map(len, sequences))) != 1 or not all([all([base in ALPHABET for base in seq]) for seq in sequences]):
raise TypeError("Invalid MSA") # that all characters are vlaid
# store the sequences
self.sequences = sequences
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
num_seq, msa_len = self.get_size()
primary_sequence = self.get_primary_sequence()
L = len(primary_sequence)
sequence_weights = self.get_sequence_weights()
# construct absolute count table
absolute_counts = np.zeros((L, 20))
gap_counts = np.zeros((L,))
j = 0
for cur in range(msa_len):
if self.sequences[0][cur] == '-':
continue
existing_aa_in_column = set([self.sequences[k][cur] for k in range(num_seq)])
for aa in existing_aa_in_column:
aa_ind = AA_TO_INT[aa]
if aa_ind == GAP_INDEX:
gap_counts[j] = sum([
1 if not use_sequence_weights else sequence_weights[k]
for k in range(num_seq)
if self.sequences[k][cur] == '-'
])
continue # do not take this into account
count = sum([
1 if not use_sequence_weights else sequence_weights[k]
for k in range(num_seq)
if self.sequences[k][cur] == aa
])
absolute_counts[j, aa_ind] = count
j += 1
aa_vectors = np.ones((20,)) * 0.05
if bg_matrix:
bg_matrix_np = np.array(bg_matrix)
aa_vectors = bg_matrix_np.sum( axis = 1 )
# aa_vectors = aa_vectors / np.linalg.norm(aa_vectors) # normalize the found weights
if redistribute_gaps:
for j in range(20):
for i in range(L):
# print('gap_counts', gap_counts.shape, 'aa_vec', aa_vectors.shape,'L', L)
gap_influence = gap_counts[i] * aa_vectors[j]
absolute_counts[i,j] += gap_influence
if add_pseudocounts:
independent_obs = self.get_number_of_observations()
G = np.zeros((L, 20))
for i in range(L):
for a in range(20):
G[i,a] = sum([
absolute_counts[i, j] / aa_vectors[j] * bg_matrix[j][a]
if bg_matrix is not None else absolute_counts[i, j] / aa_vectors[j] * 0.05 / 20
for j in range(20)])
# adjust the freq.
for i in range(L):
for j in range(20):
alpha = (independent_obs - 1)
absolute_counts[i,j] = (alpha * absolute_counts[i,j] + beta * G[i,j]) / (alpha + beta)
# normalized count by dividing sum
row_sums = absolute_counts.sum(axis = 1, keepdims = True)
relative_counts = absolute_counts / row_sums
# basic_pssm = 2 * np.log2(relative_counts / 0.05)
# divide by background frequencies
for j in range(20):
for i in range(L):
relative_counts[i, j] = relative_counts[i, j] / aa_vectors[j]
basic_pssm = 2 * np.log2(relative_counts)
# infinite score will be turned to -20
basic_pssm[np.isinf(basic_pssm)] = -20
print('result', np.rint(basic_pssm).astype(np.int64))
return np.rint(basic_pssm).astype(np.int64)
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return (len(self.sequences), len(self.sequences[0]))
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
return ''.join([base for base in self.sequences[0] if base != '-'])
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
weights = np.zeros(self.get_size())
num_seq, seq_len = self.get_size()
for j in range(seq_len):
bases_on_this_column = [seq[j] for seq in self.sequences] # (i,)
num_distinct_bases = len(set(bases_on_this_column)) # (< i)
num_common_bases = lambda test_base: len([base for base in bases_on_this_column if base == test_base])
weights[:, j] = [
1. / (num_distinct_bases * num_common_bases(base))
if num_distinct_bases != 1 else 0.
for base in bases_on_this_column
# so we can sum it all without skipping later on
]
res = np.sum(weights.astype(np.float64), axis = 1, dtype = np.float64)
return res
# return weights.astype(np.float64)
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
num_observations_each_row = [len(set([seq[j] for seq in self.sequences])) for j in range(self.get_size()[-1])]
return np.array(np.sum(num_observations_each_row) / self.get_size()[-1]).astype(np.float64)
<file_sep>##############
# Exercise 2.5
##############
# You can use genome.txt for your own testing. Good luck!
from collections import Counter
codon_dict = {
'TTT': 'F',
'TTC': 'F',
'TTA': 'L',
'TTG': 'L',
'CTT': 'L',
'CTC': 'L',
'CTA': 'L',
'CTG': 'L',
'ATT': 'I',
'ATC': 'I',
'ATA': 'I',
'ATG': 'M',
'GTT': 'V',
'GTC': 'V',
'GTA': 'V',
'GTG': 'V',
'TCT': 'S',
'TCC': 'S',
'TCA': 'S',
'TCG': 'S',
'CCT': 'P',
'CCC': 'P',
'CCA': 'P',
'CCG': 'P',
'ACT': 'T',
'ACC': 'T',
'ACA': 'T',
'ACG': 'T',
'GCT': 'A',
'GCC': 'A',
'GCA': 'A',
'GCG': 'A',
'TAT': 'Y',
'TAC': 'Y',
'CAT': 'H',
'CAC': 'H',
'CAA': 'Q',
'CAG': 'Q',
'AAT': 'N',
'AAC': 'N',
'AAA': 'K',
'AAG': 'K',
'GAT': 'D',
'GAC': 'D',
'GAA': 'E',
'GAG': 'E',
'TGT': 'C',
'TGC': 'C',
'TGG': 'W',
'CGT': 'R',
'CGC': 'R',
'CGA': 'R',
'CGG': 'R',
'AGT': 'S',
'AGC': 'S',
'AGA': 'R',
'AGG': 'R',
'GGT': 'G',
'GGC': 'G',
'GGA': 'G',
'GGG': 'G'
}
def complementary(bases):
complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}
return ''.join([complement[base] for base in bases[::1]])
def codons_to_aa(orf):
if len(orf) % 3 is not 0:
return None
codons = [orf[i:i+3] for i in range (0, len(orf), 3)]
aa_seq = ''.join(codon_dict[c] for c in codons)
return aa_seq
def aa_dist(aa_seq):
counted = Counter(aa_seq)
for key in counted:
counted[key] /= len(aa_seq)
return counted
def reversed_string(a_string):
return a_string[::-1]
def get_orfs(genome):
genome = genome.upper()
start = 'ATG'
stop = ['TAA', 'TAG', 'TGA']
codon_length = 3
possible_orf = False
length = 0
orfs = []
# check if it is a valid dna sequence
valid = 'ACTG'
for letter in genome:
if letter not in valid:
raise TypeError("Not a valid DNA sequence")
for i in range(0, len(genome), codon_length):
if genome[i:i + codon_length] == start and possible_orf == False:
possible_orf = True
start_index = i
length += 1
if possible_orf:
length += 1
if genome[i:i + codon_length] in stop:
stop_index = i + 2
if length > 33:
orf = genome[start_index:i]
orfs.append(tuple((start_index, stop_index, codons_to_aa(orf), False)))
possible_orf = False
length = 0
possible_orf = False
length = 0
for i in range(1, len(genome), codon_length):
if genome[i:i + codon_length] == start and possible_orf == False:
possible_orf = True
start_index = i
length += 1
if possible_orf:
length += 1
if genome[i:i + codon_length] in stop:
stop_index = i + 2
if length > 33:
orf = genome[start_index:i]
orfs.append(tuple((start_index, stop_index, codons_to_aa(orf), False)))
possible_orf = False
length = 0
possible_orf = False
length = 0
for i in range(2, len(genome), codon_length):
if genome[i:i + codon_length] == start and possible_orf == False:
possible_orf = True
start_index = i
length += 1
if possible_orf:
length += 1
if genome[i:i + codon_length] in stop:
stop_index = i + 2
if length > 33:
orf = genome[start_index:i]
orfs.append(tuple((start_index, stop_index, codons_to_aa(orf), False)))
possible_orf = False
length = 0
possible_orf = False
length = 0
reverse_seq = ''
for l in genome:
reverse_seq += complementary(l)
reverse_seq = reversed_string(reverse_seq)
for i in range(0, len(reverse_seq), codon_length):
if reverse_seq[i:i + codon_length] == start and possible_orf == False:
possible_orf = True
start_orf = i
start_index = len(reverse_seq) - i - 1
length += 1
if possible_orf:
length += 1
if reverse_seq[i:i + codon_length] in stop:
stop_index = len(reverse_seq) - (i + 2) - 1
if length > 33:
orf = reverse_seq[start_orf:i]
orfs.append(tuple((start_index, stop_index, codons_to_aa(orf), True)))
possible_orf = False
length = 0
possible_orf = False
length = 0
for i in range(1, len(reverse_seq), codon_length):
if reverse_seq[i:i + codon_length] == start and possible_orf == False:
possible_orf = True
start_orf = i
start_index = len(reverse_seq) - i - 1
length += 1
if possible_orf:
length += 1
if reverse_seq[i:i + codon_length] in stop:
stop_index = len(reverse_seq) - (i + 2) - 1
if length > 33:
orf = reverse_seq[start_orf:i]
orfs.append(tuple((start_index, stop_index, codons_to_aa(orf), True)))
possible_orf = False
length = 0
possible_orf = False
length = 0
for i in range(2, len(reverse_seq), codon_length):
if reverse_seq[i:i + codon_length] == start and possible_orf == False:
possible_orf = True
start_orf = i
start_index = len(reverse_seq) - i - 1
length += 1
if possible_orf:
length += 1
if reverse_seq[i:i + codon_length] in stop:
stop_index = len(reverse_seq) - (i + 2) - 1
if length > 33:
orf = reverse_seq[start_orf:i]
orfs.append(tuple((start_index, stop_index, codons_to_aa(orf), True)))
possible_orf = False
length = 0
return orfs<file_sep>import numpy as np
from collections import Counter
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
# Check the sequences for correctness
sequences_correct = True
# The MSA contains at least one sequence
if len(sequences) == 0:
sequences_correct = False
else:
seq_len = len(sequences[0])
for seq in sequences:
# All sequences have the same length (including gaps)
if seq_len != len(seq):
sequences_correct = False
for aa in seq:
# All sequences contain only valid amino acids and gap characters
if aa not in ALPHABET:
sequences_correct = False
if sequences_correct:
self.sequences = sequences
self.seqs = None
self.aas_counts_per_pos = []
self.pssm = self.get_pssm()
else:
raise TypeError("input MSA does not contain valid sequences")
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
# Convert input sequences to numpy format
self.seqs = np.array([ list(seq) for seq in self.sequences ])
#pssm = np.zeros((len(self.sequences[0]), 20))
default_background_freq = 0.05
# Count amino acids per position in MSA
if len(self.aas_counts_per_pos) == 0:
self.compute_aas_counts_per_pos()
# Compute background frequencies from bg_matrix if needed:
background_freqs = None
if bg_matrix is not None:
bg_matrix = np.array(bg_matrix)
background_freqs = np.sum(bg_matrix, axis=0)
else:
background_freqs = np.ones(20) * default_background_freq
# 1. Calculate sequence weights
seq_weights = None
if use_sequence_weights:
seq_weights = self.get_sequence_weights()
else:
seq_weights = np.ones(self.seqs.shape[0]) # Size: num_seqs x 1
# 2. Count (with weights) observed amino acids and gaps
ungapped_len = len(self.get_primary_sequence())
weight_counts = np.zeros((ungapped_len, 21)) # Size: length_primary_seq x 21
j_ungapped = 0
for j in range(self.seqs.shape[1]):
# Skip computation for gaps in primary sequence
if self.seqs[0, j] != '-':
for i in range(self.seqs.shape[0]):
weight_counts[j_ungapped, AA_TO_INT[self.seqs[i, j]]] += seq_weights[i]
j_ungapped += 1
# 3. Redistribute gaps according to background frequencies
if redistribute_gaps:
# Add gap counts to amino acid counts
gap_addition = (np.tile(background_freqs, (weight_counts.shape[0], 1)).transpose() * weight_counts[:, -1]) # num_aa x primary_seq_length
weight_counts[:, 0:AA_TO_INT['-']] = (weight_counts[:, 0:AA_TO_INT['-']].transpose() + gap_addition).transpose()
# Multiply weighted gap counts by default background frequency
# Don't update the gap column as it is removed
# 5. Remove gap column
weight_counts = weight_counts[:, 0:-1]
# 6. Add weighted pseudocounts
if add_pseudocounts:
pseudocounts = None
#print("Pseudocounts weights")
#print(pseudocounts.shape)
if bg_matrix is not None:
pseudocounts = np.zeros_like(weight_counts) # Size: length_primary_seq x 20
for i in range(pseudocounts.shape[0]):
for j in range(pseudocounts.shape[1]):
for k in ALPHABET[0:-1]:
pseudocounts[i, j] += weight_counts[i, AA_TO_INT[k]] * bg_matrix[j, AA_TO_INT[k]] / background_freqs[AA_TO_INT[k]]
else: # No blosum matrix provided -> Use only the background probabilities
pseudocounts = (np.ones_like(weight_counts).transpose() * (np.sum(weight_counts, axis=1) * default_background_freq)).transpose()
# Adjust frequencies using pseudocounts
alpha = self.get_number_of_observations() - 1
weight_counts = (alpha * weight_counts + beta * pseudocounts) / (alpha + beta)
# 7. Normalize weight_counts to relative frequencies
weight_counts = (weight_counts.transpose() / np.sum(weight_counts, axis=1)).transpose()
# 8. Divide by background frequencies (standard or from BLOSUM62)
weight_counts = weight_counts / background_freqs
# 9. Calculate Log-Score
pssm = 2.0 * np.log2(weight_counts)
# 10. Replace -inf value by -20
pssm = np.where(pssm == -np.inf, -20, pssm)
return np.rint(pssm).astype(np.int64)
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return (len(self.sequences), len(self.sequences[0]))
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
return self.sequences[0].replace('-', '')
def compute_aas_counts_per_pos(self):
for j in range(self.seqs.shape[1]):
aas, pos_counts = np.unique(self.seqs[:, j], return_counts=True)
aas_counts = dict(zip(aas, pos_counts))
self.aas_counts_per_pos += [aas_counts]
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
weights = np.zeros(self.seqs.shape[0]) # Size: num_seqs x 1
j = 0
for aas_counts in self.aas_counts_per_pos:
r_i = len(aas_counts) # Different aa in column
if r_i > 1:
for i in range(self.seqs.shape[0]):
weights[i] += 1.0 / (r_i * aas_counts[self.seqs[i, j]])
# If r_i == 1 then exclude current position
j = j + 1
return weights.astype(np.float64)
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
primary_seq_len = self.seqs.shape[1]
num_obs = 0.0
for aas_counts in self.aas_counts_per_pos:
num_obs += len(aas_counts)
num_obs = num_obs / primary_seq_len
return num_obs #.astype(np.float64)
msa_sequences = [
"--------MVATGLFVGLNKGHVVTKRE----------QPPRPNNRKGKTSKRTIFIRNLIKEVAGQAPYEKRITELLKVGKD---KRALKVAKRKLGTHKRAKRKREEMSSVLRKMRSGGAGASEKKK----",
"----MTTPQVKTGLFVGLNKGHVVTRRE----------LAPRPRSRKGKTSKRTIFIRNLIKEVAGQAPYEKRITELLKVGKD---KRALKVAKRKLGTHKRAKRKREEMSSVLRKMRSGGGGATEKKK----",
"----MTTPAVKTGLFVGLNKGHVVTRRE----------LAPRPNSRKGKTSKRTIFIRKLIREVAGMAPYEKRITELLKVGKD---KRALKVAKRKLGTHKRAKRKREEMSSVLRKMRSLGGAAAAEKKM---",
"----MTTPQVKTGLFVGLNKGHVVTRRE----------LAPRPRSRKGKTSKRTIFIRNLIKEVAGQAPYEKRITELLK------------VAKRKLGTHKRAKRKREEMSSVLRKMRSGGGGATEKKK----",
"----------MGEIAVGLNKGHQVTKKA----------GTPRPSRRKGFLSQRVKKVRAVVREVAGWAPYERRVMELLKVGKD---KRALKMCKRKLGTHMRGKKKREEMAGVLRKMQAASKGE---------",
"----MAPKQPNTGLFVGLNKGHIVTKKE----------LAPRPSDRKGKTSKRTHFVRNLIREVAGFAPYEKRITELLKVGKD---KRALKVRQEKVGHSQESKEEER--GDVQCSP--------PDEGWWWY",
"---------MAPGLVVGLNKGKVLTKRQ----------LPERPSRRKGQLSKRTSFVRSIVREVAGFAPYERRVMELIRNSQD---KRARKLAKKRLGTLKRAKGKIEELTSVIQSSRLAH------------",
"-------MGVQYKLAVGLGKGHKVTKNE----------YKPRPSRRKGALSKHTRFVRDLIREVCGFAPFERRAMELLKVSKD---KRALKFIKKRLGTHLRGKRKRDELSNVLVAQRKAAAHKEKTEHK---",
"-------MALRYPMAVGLNKGHKVTKNV----------SKPRHSRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"-------MALRYPMAVGLNKGHKVTKNV----------SKPRHSRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"-------MALRYPMAVGLNKGHKVTKNV----------GKPRHSRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"---------MAPGLVVGLNKGKTLTKRQ----------LPERPSRRKGHLSKRTAFVRSIVREVAGFAPYERRVMELIRNSQD---KRARKLAKKRLGTLKRAKGKIEELTSVIQSSRLAH------------",
"-------MAIRYPMAVGLNKGHKVTKNV----------SKPRHCRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"-------MAIRYPMAVGLNKGHRVTKNV----------TKPRHCRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRIGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"-------MAIRYPMAVGLNKGHRVTKNV----------TKPRHCRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRIGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"-------MALRYPMAVGLNKGHKVTKNV----------SKPRHSRRRGRLTKHTKFVRDMIREVCAFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"-------MAIRYPMAVGLNKGYKVTKNV----------SKPRHCRRRGRLTKHTKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNVLAAMRKAAAKKD--------",
"-------MVVRYPMAVGLNKGHKVTKNV----------SKPKHSRRRGRLTKHAKFARDLIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNTLAAMRKAAAKKE--------",
"-------MAIRYPMAVGLKKGHPVTKNV----------TKPKHSRRGGRLTKHSKFVRDMIREVCGFAPYERRAMELLKVSKD---KRALKFIKKRVGTHIRAKRKREELSNILAAMRKAAAKKE--------",
"---MAKEAPAKTGLAVGLNKGHKTTARV----------VKPRVSRTKGHLSKRTAFVREVVKEVAGLAPYERRVIELLRNSKD---KRARKLAKKRLGTFGRAKRKVDELQRVIAESRRAH------------",
"-------MALRYPMAVGLNKGHKVTKNV----------SKPRHSRRRSRLTNHTKFVRDMIREVCGFAPYERRAMELLKVSKS---KRALKFIKKRVGTHIRAKRKREELSNVLAAMEEAAAKKD--------",
"-----MSGPGIEGLAVGLNKGHAATQLP----------VKQRQNRHKGVASKKTKIVRELVREITGFAPYERRVLEMLRISKD---KRALKFLKRRIGTHRRAKGKREELQNVIIAQRKAHK-----------",
"--------MAKSGIAAGVNKGRKTTAKE----------VAPKISYRKGASSQRTVFVRSIVKEVAGLAPYERRLIELIRNAGE---KRAKKLAKKRLGTHKRALRKVEEMTQVIAESRRH-------------",
"-------MAVRYELAIGLNKGHKTSKIRNVKYTGDKKVKGLRGSRLKNIQTRHTKFMRDLVREVVGHAPYEKRTMELLKVSKD---KRALKFLKRRLGTHIRAKRKREELSNILTQLRKAQTHAK--------",
"-------MAVKTGIAIGLNKGKKVTQMT----------PAPKISYKKGAASNRTKFVRSLVREIAGLSPYERRLIDLIRNSGE---KRARKVAKKRLGSFTRAKAKVEEMNNIIAASRRH-------------",
"-------MTVKTGIAIGLNKGKKVTSMT----------PAPKISYKKGAASNRTKFVRSLVREIAGLSPYERRLIDLIRNSGE---KRARKVAKKRLGSFTRAKAKVEEMNNIIAASRRH-------------",
"---------MAKGQAVGINKGFITTQLE-------KKLQKHSAVQRKGKLGKRVALVRQVIREVTGFAPYEKRIIELIKAGSAKDSKKATKIARKRLGTHRRAKVKKALLEEAVRAQRKK-------------",
"MSSAATKPVKRSGIIKGFNKGHAVAKRT------------VTSTFKKQVVTKRVAAIRDVIREISGFSPYERRVSELLKSGLD---KRALKVAKKRLGSIQAGKKKRDDIANINRKASAK-------------",
"MKNA--------------------YKKVRVRYPVKRPDVKRKQRGPRAETQESRFLAAAVADEISGLSPLEKKAISLLEAKNN---NKAQKLLRKRLGSHKRAVAKVEKLARMLLEK----------------"
]
bg_matrix = np.array([
[
0.0215,
0.0016,
0.0022,
0.003,
0.0016,
0.0058,
0.0011,
0.0032,
0.0033,
0.0044,
0.0013,
0.0019,
0.0022,
0.0019,
0.0023,
0.0063,
0.0037,
0.0051,
0.0004,
0.0013
],
[
0.0016,
0.0119,
0.0004,
0.0004,
0.0005,
0.0008,
0.0002,
0.0011,
0.0005,
0.0016,
0.0004,
0.0004,
0.0004,
0.0003,
0.0004,
0.001,
0.0009,
0.0014,
0.0001,
0.0003
],
[
0.0022,
0.0004,
0.0213,
0.0049,
0.0008,
0.0025,
0.001,
0.0012,
0.0024,
0.0015,
0.0005,
0.0037,
0.0012,
0.0016,
0.0016,
0.0028,
0.0019,
0.0013,
0.0002,
0.0006
],
[
0.003,
0.0004,
0.0049,
0.0161,
0.0009,
0.0019,
0.0014,
0.0012,
0.0041,
0.002,
0.0007,
0.0022,
0.0014,
0.0035,
0.0027,
0.003,
0.002,
0.0017,
0.0003,
0.0009
],
[
0.0016,
0.0005,
0.0008,
0.0009,
0.0183,
0.0012,
0.0008,
0.003,
0.0009,
0.0054,
0.0012,
0.0008,
0.0005,
0.0005,
0.0009,
0.0012,
0.0012,
0.0026,
0.0008,
0.0042
],
[
0.0058,
0.0008,
0.0025,
0.0019,
0.0012,
0.0378,
0.001,
0.0014,
0.0025,
0.0021,
0.0007,
0.0029,
0.0014,
0.0014,
0.0017,
0.0038,
0.0022,
0.0018,
0.0004,
0.0008
],
[
0.0011,
0.0002,
0.001,
0.0014,
0.0008,
0.001,
0.0093,
0.0006,
0.0012,
0.001,
0.0004,
0.0014,
0.0005,
0.001,
0.0012,
0.0011,
0.0007,
0.0006,
0.0002,
0.0015
],
[
0.0032,
0.0011,
0.0012,
0.0012,
0.003,
0.0014,
0.0006,
0.0184,
0.0016,
0.0114,
0.0025,
0.001,
0.001,
0.0009,
0.0012,
0.0017,
0.0027,
0.012,
0.0004,
0.0014
],
[
0.0033,
0.0005,
0.0024,
0.0041,
0.0009,
0.0025,
0.0012,
0.0016,
0.0161,
0.0025,
0.0009,
0.0024,
0.0016,
0.0031,
0.0062,
0.0031,
0.0023,
0.0019,
0.0003,
0.001
],
[
0.0044,
0.0016,
0.0015,
0.002,
0.0054,
0.0021,
0.001,
0.0114,
0.0025,
0.0371,
0.0049,
0.0014,
0.0014,
0.0016,
0.0024,
0.0024,
0.0033,
0.0095,
0.0007,
0.0022
],
[
0.0013,
0.0004,
0.0005,
0.0007,
0.0012,
0.0007,
0.0004,
0.0025,
0.0009,
0.0049,
0.004,
0.0005,
0.0004,
0.0007,
0.0008,
0.0009,
0.001,
0.0023,
0.0002,
0.0006
],
[
0.0019,
0.0004,
0.0037,
0.0022,
0.0008,
0.0029,
0.0014,
0.001,
0.0024,
0.0014,
0.0005,
0.0141,
0.0009,
0.0015,
0.002,
0.0031,
0.0022,
0.0012,
0.0002,
0.0007
],
[
0.0022,
0.0004,
0.0012,
0.0014,
0.0005,
0.0014,
0.0005,
0.001,
0.0016,
0.0014,
0.0004,
0.0009,
0.0191,
0.0008,
0.001,
0.0017,
0.0014,
0.0012,
0.0001,
0.0005
],
[
0.0019,
0.0003,
0.0016,
0.0035,
0.0005,
0.0014,
0.001,
0.0009,
0.0031,
0.0016,
0.0007,
0.0015,
0.0008,
0.0073,
0.0025,
0.0019,
0.0014,
0.0012,
0.0002,
0.0007
],
[
0.0023,
0.0004,
0.0016,
0.0027,
0.0009,
0.0017,
0.0012,
0.0012,
0.0062,
0.0024,
0.0008,
0.002,
0.001,
0.0025,
0.0178,
0.0023,
0.0018,
0.0016,
0.0003,
0.0009
],
[
0.0063,
0.001,
0.0028,
0.003,
0.0012,
0.0038,
0.0011,
0.0017,
0.0031,
0.0024,
0.0009,
0.0031,
0.0017,
0.0019,
0.0023,
0.0126,
0.0047,
0.0024,
0.0003,
0.001
],
[
0.0037,
0.0009,
0.0019,
0.002,
0.0012,
0.0022,
0.0007,
0.0027,
0.0023,
0.0033,
0.001,
0.0022,
0.0014,
0.0014,
0.0018,
0.0047,
0.0125,
0.0036,
0.0003,
0.0009
],
[
0.0051,
0.0014,
0.0013,
0.0017,
0.0026,
0.0018,
0.0006,
0.012,
0.0019,
0.0095,
0.0023,
0.0012,
0.0012,
0.0012,
0.0016,
0.0024,
0.0036,
0.0196,
0.0004,
0.0015
],
[
0.0004,
0.0001,
0.0002,
0.0003,
0.0008,
0.0004,
0.0002,
0.0004,
0.0003,
0.0007,
0.0002,
0.0002,
0.0001,
0.0002,
0.0003,
0.0003,
0.0003,
0.0004,
0.0065,
0.0009
],
[
0.0013,
0.0003,
0.0006,
0.0009,
0.0042,
0.0008,
0.0015,
0.0014,
0.001,
0.0022,
0.0006,
0.0007,
0.0005,
0.0007,
0.0009,
0.001,
0.0009,
0.0015,
0.0009,
0.0102
]
])
a = MSA(msa_sequences)
print(a.get_pssm(bg_matrix=bg_matrix))
print(a.get_pssm(bg_matrix=bg_matrix, redistribute_gaps=True, add_pseudocounts=True))
#print(a.get_pssm(redistribute_gaps=True, use_sequence_weights=True))
print(a.get_number_of_observations())<file_sep>##############
# Exercise 2.7
##############
polar = set('NQSTY')
sulfur = set('CM')
aromatic = set('FWYH')
negative = set('DE')
positive = set('RHK')
hydrophobic = set('AVILMFYW')
amino_acids = set('ACDEFGHIKLMNPQRSTVWY')
def isAA(aa):
return (isinstance(aa, str) and aa.upper() in amino_acids)
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
return (isAA(aa) and aa.upper() in positive)
def isNegativelyCharged(aa):
return (isAA(aa) and aa.upper() in negative)
def isHydrophobic(aa):
return (isAA(aa) and aa.upper() in hydrophobic)
def isAromatic(aa):
return (isAA(aa) and aa.upper() in aromatic)
def isPolar(aa):
return (isAA(aa) and aa.upper() in (polar | positive | negative))
def isProline(aa):
return (isAA(aa) and aa.upper() == 'P')
def containsSulfur(aa):
return (isAA(aa) and aa.upper() in sulfur)
def isAcid(aa):
return isNegativelyCharged(aa)
def isBasic(aa):
return isPositivelyCharged(aa)
<file_sep>import numpy as np
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.align()
def align(self):
"""
Align given strings using the Smith-Waterman algorithm.
NB: score matrix and the substitution matrix are different matrices!
"""
self.score_matrix = self.get_score_matrix()
self.alignment = self.get_alignment()
def has_alignment(self):
"""
:return: True if a local alignment has been found, False otherwise
"""
align1 = self.alignment[0]
align2 = self.alignment[1]
if (len(align1)>0 and len(align2)>0):
return True
else:
return False
def get_alignment(self):
"""
:return: alignment represented as a tuple of aligned strings
"""
align1=""
align2=""
seq1 = self.string2
seq2 = self.string1
mat = self.score_matrix
ii=[]
jj=[]
i=np.unravel_index(mat.argmax(),mat.shape)[0]
j=np.unravel_index(mat.argmax(),mat.shape)[1]
while(mat[i][j]!=0):
ii.append(i-1)
jj.append(j-1)
score_up = mat[i][j -1]
score_diagonal = mat[i- 1][j - 1]
score_left = mat[i-1][j]
score = mat[i][j]
pen=self.gap_penalty
tmp=None
if (seq1[i-1]=='-' or seq2[j - 1]=='-'):
tmp=pen
else:
tmp=self.substitution_matrix[seq1[i - 1]][seq2[j - 1]]
if score == score_left +pen:
align1 =align1+ seq1[i - 1]
align2 =align2+ '-'
i-= 1
elif (score_diagonal + tmp==score):
align1 = align1+ seq1[i - 1]
align2 += seq2[j - 1]
i -= 1
j -= 1
elif score== score_up +pen:
align1+='-'
align2+=seq2[j - 1]
j-=1
rev1 = align1[::-1]
rev2 = align2[::-1]
self.index1 = ii
self.index2 = jj
return (rev2,rev1)
def is_residue_aligned(self, string_number, residue_index):
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been alined
False otherwise
"""
if string_number == 1:
index = self.index2
else:
index = self.index1
if residue_index in index:
return True
else:
return False
def get_score_matrix(self):
mat = self.score_matrix
pen = self.gap_penalty
seq1 = self.string2
seq2 = self.string1
for i in range(mat.shape[0]):
mat[i][0] = 0
for j in range(mat.shape[1]):
mat[0][j] = 0
for i in range(1,mat.shape[0]):
for j in range(1,mat.shape[1]):
tmp=None
if (seq1[i-1]=='-' or seq2[j-1]=='-'):
tmp=pen
else:
tmp=self.substitution_matrix[seq1[i - 1]][seq2[j - 1]]
m = mat[i - 1][j - 1] + tmp
d = mat[i - 1][j] + pen
ii = mat[i][j - 1] + pen
mat[i][j] = max(m,d,ii,0)
return np.array(mat)
<file_sep>import numpy as np
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
self.sequences = sequences
if len(sequences) == 0:
raise ('Empty list of sequences')
self.seq_len = len(sequences[0])
for seq in sequences:
if len(seq) != self.seq_len:
raise ('All sequences do not have same length')
for char in seq:
if char not in ALPHABET:
raise ('Invalid alphabet ', char, ' present in sequence')
self.aacid_col_mat = [dict() for i in range(self.seq_len)]
self.i_col_r = None
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return len(self.sequences), len(self.sequences[0])
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
return self.sequences[0].replace('-', '')
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
def get_sequence_weights_matrix(wts, wts_mat):
"""
:param wts:
:param wts_mat:
:return:
"""
for r, seq in enumerate(self.sequences):
for c, alphabet in enumerate(seq):
wts_mat[r, c] = 1 / (self.i_col_r[c] * self.aacid_col_mat[c][alphabet])
if self.i_col_r[c] > 1:
wts[r] += wts_mat[r, c]
def calculate_amino_acids_cols():
"""
:return:
"""
for i in range(self.seq_len):
aacids_i = dict()
for seq in self.sequences:
aacids_i[seq[i]] = aacids_i.get(seq[i], 0) + 1
self.aacid_col_mat[i] = aacids_i
calculate_amino_acids_cols()
self.i_col_r = [len(self.aacid_col_mat[i].keys()) for i in range(self.seq_len)]
seq_wts = np.zeros(len(self.sequences))
seq_wts_mat = np.zeros((len(self.sequences), self.seq_len))
get_sequence_weights_matrix(seq_wts, seq_wts_mat)
return seq_wts.astype(np.float64)
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
self.get_sequence_weights()
num_observations = np.float64(sum(i for i in self.i_col_r)) / self.seq_len
return num_observations
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
pssm - position-specific scoring matrix
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param use_sequence_weights: Calculate and apply sequence weights.
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
def pseudocounts(bg_frequency, beta, bg_matrix, counts_mat):
"""
:param bg_frequency:
:param beta:
:param bg_matrix:
:param counts_mat:
:return:
"""
pseudocounts = np.zeros(counts_mat.shape)
for r in range(counts_mat.shape[0]):
for c in range(counts_mat.shape[1]):
if counts_mat[r, c] == 0:
continue
pseudocounts[:, c] += (counts_mat[r][c] * bg_matrix[r, :]) / bg_frequency[
r, 0]
N = self.get_number_of_observations()
counts_mat = ((N - 1) * counts_mat + pseudocounts * beta) / (N - 1 + beta)
return counts_mat
if use_sequence_weights:
seq_wts = self.get_sequence_weights()
if bg_matrix:
bg_matrix = np.array([np.array(x) for x in bg_matrix])
else:
bg_matrix = np.zeros((20, 20)) + (1.0 / (20 * 20))
bg_frequency_mat = bg_matrix.sum(axis=1)
bg_frequency_mat = bg_frequency_mat.reshape(20, 1)
counts_mat = np.zeros((len(ALPHABET), self.seq_len))
for s, seq in enumerate(self.sequences):
one_amino_acid_wt = 1
if use_sequence_weights:
one_amino_acid_wt = seq_wts[s]
for i, j in enumerate(seq):
counts_mat[AA_TO_INT[j], i] += one_amino_acid_wt
if redistribute_gaps:
counts_mat[0:len(ALPHABET) - 1, :] += counts_mat[len(ALPHABET) - 1, :] * bg_frequency_mat
counts_mat = np.delete(counts_mat, (len(ALPHABET) - 1), axis=0)
if add_pseudocounts:
counts_mat = pseudocounts(bg_frequency_mat, beta, bg_matrix, counts_mat)
counts_mat = counts_mat / np.sum(counts_mat, axis=0)
pssm = 2 * np.log2(counts_mat / bg_frequency_mat)
pssm[np.isinf(pssm)] = -20
pssm = pssm[:, [i for i, j in enumerate(self.sequences[0]) if j is not '-']]
pssm = np.rint(pssm.T).astype(np.int64)
return pssm
<file_sep>##############
# Exercise 2.7
##############
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
return aa == "R" or aa == "H" or aa == "K"
def isNegativelyCharged(aa):
return aa == "D" or aa == "E"
def isHydrophobic(aa):
hydrophobic = "AVILMFYW"
return len(aa) == 1 and hydrophobic.find(aa) != -1
def isProline(aa):
return aa == "P"
def containsSulfur(aa):
return aa == "M" or aa == "C"
def isAcid(aa):
return aa == "D" or aa == "E"
aa_dict = {
'A': {"aromatic": False, "basic": False, "polar": False},
'R': {"aromatic": False, "basic": True, "polar": True},
'N': {"aromatic": False, "basic": False, "polar": True},
'D': {"aromatic": False, "basic": False, "polar": True},
'C': {"aromatic": False, "basic": False, "polar": False},
'E': {"aromatic": False, "basic": False, "polar": True},
'Q': {"aromatic": False, "basic": False, "polar": True},
'G': {"aromatic": False, "basic": False, "polar": False},
'H': {"aromatic": True, "basic": True, "polar": True},
'I': {"aromatic": False, "basic": False, "polar": False},
'L': {"aromatic": False, "basic": False, "polar": False},
'K': {"aromatic": False, "basic": True, "polar": True},
'M': {"aromatic": False, "basic": False, "polar": False},
'F': {"aromatic": True, "basic": False, "polar": False},
'P': {"aromatic": False, "basic": False, "polar": False},
'S': {"aromatic": False, "basic": False, "polar": True},
'T': {"aromatic": False, "basic": False, "polar": True},
'W': {"aromatic": True, "basic": False, "polar": False},
'Y': {"aromatic": True, "basic": False, "polar": True},
'V': {"aromatic": False, "basic": False, "polar": False},
}
def isAromatic(aa):
return aa_dict[aa]["aromatic"]
def isBasic(aa):
return aa_dict[aa]["basic"]
def isPolar(aa):
return aa_dict[aa]["polar"]
<file_sep>##############
# Exercise 2.6
##############
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
aminos = ['M', 'W', 'U', 'O', 'Y', 'F', 'C', 'N', 'D', 'Q', 'E', 'H', 'K', 'I', 'G', 'A', 'V', 'T', 'P', 'L', 'S', 'R']
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def codons_to_aa(orf):
if len(orf) % 3 is not 0:
return None
codons = [orf[i:i+3] for i in range(0, len(orf), 3)]
aa_seq = ''.join(codon_dict[c] for c in codons)
return aa_seq
def get_counts(self):
return len(self.__sequences)
def get_average_length(self):
print(sum([len(s) for s in self.__sequences]))
print(self.get_counts())
return sum([len(s.replace("*", "")) for s in self.__sequences])/self.get_counts()
def read_fasta(self, path):
with open(path, "r") as f:
tmpSeq = ""
for line in f:
if line.startswith(";"):
continue
if line.startswith(">"):
if tmpSeq != "":
self.__sequences.append(tmpSeq)
tmpSeq = ""
continue
tmpSeq += line.strip()
if tmpSeq != "":
self.__sequences.append(tmpSeq)
def get_abs_frequencies(self):
res = {}
for a in self.aminos:
count = 0
for s in self.__sequences:
count = count + s.count(a)
res[a] = count
return res
def get_av_frequencies(self):
absF = self.get_abs_frequencies()
total = sum([len(s.replace("*", "")) for s in self.__sequences])
res = {}
for a in self.aminos:
res[a]=absF[a]/total
return res
<file_sep>##############
# Exercise 2.7
##############
charged = ['R', 'H', 'K', 'D', 'E']
positivelyCharged = ['R', 'H', 'K']
negativelyCharged = ['D', 'E']
hydrophobic = ['A', 'V', 'I', 'L', 'M', 'F', 'P', 'G', 'W']
aromatic = ['F', 'W', 'Y', 'H']
polar = ['R', 'N', 'D', 'E', 'Q', 'H', 'K', 'S', 'T', 'Y']
proline = ['P']
sulfur = ['C', 'M']
acidic = ['D', 'E']
basic = ['R', 'H', 'K']
def isCharged(symbol):
if symbol.upper() in charged:
return True
else:
return False
def isPositivelyCharged(symbol):
if symbol.upper() in positivelyCharged:
return True
else:
return False
def isNegativelyCharged(symbol):
if symbol.upper() in negativelyCharged:
return True
else:
return False
def isHydrophobic(symbol):
if symbol.upper() in hydrophobic:
return True
else:
return False
def isAromatic(symbol):
if symbol.upper() in aromatic:
return True
else:
return False
def isPolar(symbol):
if symbol.upper() in polar:
return True
else:
return False
def isProline(symbol):
if symbol.upper() in proline:
return True
else:
return False
def containsSulfur(symbol):
if symbol.upper() in sulfur:
return True
else:
return False
def isAcid(symbol):
if symbol.upper() in acidic:
return True
else:
return False
def isBasic(symbol):
if symbol.upper() in basic:
return True
else:
return False
<file_sep>import numpy as np
#from tests.matrices import MATRICES
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.align()
def align(self):
"""
Align given strings using the Smith-Waterman algorithm.
NB: score matrix and the substitution matrix are different matrices!
"""
# backtracking
self.backtrack = [[[] for j in range(self.score_matrix.shape[1])] for i in range(self.score_matrix.shape[0])]
for i in range(1, self.score_matrix.shape[0], 1):
for j in range(1, self.score_matrix.shape[1], 1):
d = self.score_matrix[i - 1, j - 1] + self.substitution_matrix[self.string2[i - 1]][self.string1[j - 1]]
v = self.score_matrix[i - 1, j] + self.gap_penalty
h = self.score_matrix[i, j - 1] + self.gap_penalty
self.score_matrix[i, j] = max([d, v, h, 0])
if self.score_matrix[i, j] == d:
self.backtrack[i][j].append('d')
if self.score_matrix[i, j] == v:
self.backtrack[i][j].append('v')
if self.score_matrix[i, j] == h:
self.backtrack[i][j].append('h')
def has_alignment(self):
"""
:return: True if a local alignment has been found, False otherwise
"""
return self.get_alignment() != ('', '')
def get_alignment(self):
"""
:return: alignment represented as a tuple of aligned strings
"""
def rec_get_alignments(i, j):
if self.score_matrix[i, j] == 0:
return [('', '')]
res = []
for dir in self.backtrack[i][j]:
if dir == 'd':
l = rec_get_alignments(i - 1, j - 1)
#print(l)
res += [(x + self.string1[j - 1], y + self.string2[i - 1]) for (x, y) in l]
elif dir == 'v':
l = rec_get_alignments(i - 1, j)
#print(l)
res += [(x + '-', y + self.string2[i - 1]) for (x, y) in l]
else:
l = rec_get_alignments(i, j - 1)
#print(l)
res += [(x + self.string1[j - 1], y + '-') for (x, y) in l]
return res
argmax = np.unravel_index(np.argmax(self.score_matrix), self.score_matrix.shape)
return rec_get_alignments(argmax[0], argmax[1])[0]
def is_residue_aligned(self, string_number, residue_index):
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been alined
False otherwise
"""
alignment = self.get_alignment()
if string_number == 1:
count = 0
while alignment[0][0:2] != self.string1[count:count + 2]:
count += 1
s = ('-' * count) + alignment[0] + ('-' * (len(self.string1) - count - len(alignment[0])))
#print(s)
return self.string1[residue_index] == s[residue_index]
else:
count = 0
while alignment[1][0:2] != self.string2[count:count + 2]:
count += 1
s = ('-' * count) + alignment[1] + ('-' * (len(self.string2) - count - len(alignment[1])))
#print(s)
return self.string2[residue_index] == s[residue_index]
"""
if __name__ == '__main__':
a = LocalAlignment("ARNDCEQGHI", "DDCEQHG", -6, MATRICES["blosum"])
print(a.score_matrix)
print(a.get_alignment())
print(a.is_residue_aligned(2, 0))
"""
<file_sep>import numpy as np
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.traceback_matrix = []
for i in range(0, len(string2) + 2):
row = []
for j in range(0, len(string1) + 2):
row.append([])
self.traceback_matrix.append(row)
self.str1_indices = []
self.str2_indices = []
self.calculate_scores()
self.alignment = self.align()
def calculate_scores(self):
rows = len(self.string2) + 1
columns = len(self.string1) + 1
acc = self.gap_penalty
for i in range(1, columns):
self.score_matrix[0][i] = acc
acc = acc + self.gap_penalty
acc = self.gap_penalty
for i in range(1, rows):
self.score_matrix[i][0] = acc
acc = acc + self.gap_penalty
do_row = True
do_column = True
width = 0
height = 0
while True:
if width < columns - 1:
do_column = True
width += 1
if height < rows - 1:
do_row = True
height += 1
# Row
if do_row:
do_row = False
for i in range(1, width):
self.calc_cell_score(height, i)
# Column
if do_column:
do_column = False
for i in range(1, height):
self.calc_cell_score(i, width)
# Corner
self.calc_cell_score(height, width)
if width == columns - 1 and height == rows - 1:
break
def calc_cell_score(self, y, x):
left_score = self.score_matrix[y][x-1] + self.gap_penalty
top_score = self.score_matrix[y-1][x] + self.gap_penalty
match_score = self.substitution_matrix[self.string1[x-1]][self.string2[y-1]]
diag_score = self.score_matrix[y-1][x-1] + match_score
max_score = max(left_score, top_score, diag_score, 0)
if left_score == max_score:
self.traceback_matrix[y][x].append('L')
if top_score == max_score:
self.traceback_matrix[y][x].append('T')
if diag_score == max_score:
self.traceback_matrix[y][x].append('D')
self.score_matrix[y][x] = max_score
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
y, x = np.unravel_index(np.argmax(self.score_matrix), self.score_matrix.shape)
return self.get_alignments(y, x)
def get_alignments(self, y, x):
if self.score_matrix[y][x] == 0:
return '', ''
if x == 0 and y == 0:
return None
for d in self.traceback_matrix[y][x]:
if d == 'D':
prefix = self.get_alignments(y-1, x-1)
if prefix is not None:
str1, str2 = prefix
self.str1_indices.append(x-1)
self.str2_indices.append(y-1)
return str1+self.string1[x-1], str2+self.string2[y-1]
if d == 'L':
prefix = self.get_alignments(y, x-1)
if prefix is not None:
str1, str2 = prefix
self.str1_indices.append(x-1)
return str1+self.string1[x-1], str2+'-'
if d == 'T':
prefix = self.get_alignments(y-1, x)
if prefix is not None:
str1, str2 = prefix
self.str2_indices.append(y-1)
return str1+'-', str2+self.string2[y-1]
return None
def has_alignment(self):
"""
:return: True if a local alignment has been found, False otherwise
"""
return self.alignment != ('', '')
def get_alignment(self):
"""
:return: alignment represented as a tuple of aligned strings
"""
return self.alignment if self.alignment is not None else ('', '')
def is_residue_aligned(self, string_number, residue_index):
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been aligned
False otherwise
"""
if string_number == 1:
return residue_index in self.str1_indices
if string_number == 2:
return residue_index in self.str2_indices
return False
<file_sep>import numpy as np
from tests import matrices
import random
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.stringM = string1
self.stringN = string2
self.gap_penalty = gap_penalty
self.substituion_matrix = matrix
self.score_matrix = np.zeros((len(self.stringN) + 1, len(self.stringM) + 1), dtype=np.int)
self.traceback = {}
self.paths = None
self.score = None
self.align_start = None
self.align_end = None
self.align()
def get_paths(self, n, m):
if self.traceback.get((n, m), []) == []:
# end of path
self.align_start = (n,m)
return [(self.stringM[m-1], self.stringN[n-1])]
print("{} with val {} - Tuple n {}, m {}".format((n,m), self.score_matrix[n,m], self.stringN[n-1], self.stringM[m-1]))
paths = []
#score = self.score_matrix[n,m] # prepended with 0
for n_pred,m_pred in self.traceback[(n, m)]:
for seq_trail_m, seq_trail_n in self.get_paths(n_pred, m_pred):
if n_pred == n-1 and m_pred == m-1:
# n index for the string with one additional element in front
paths.append((seq_trail_m + self.stringM[m-1], seq_trail_n + self.stringN[n-1]))
print("go diag from {} with val {} - Tuple {},{}".format((n,m), self.score_matrix[n,m], self.stringN[n-1], self.stringM[m-1]))
elif n_pred == n and m_pred == m-1:
# n index for the string with one additional element in front
paths.append((seq_trail_m + self.stringM[m-1], seq_trail_n + "-"))
print("go m-1 from {} with val {} - Tuple {},{}".format((n,m), self.score_matrix[n,m], "-", self.stringM[m-1]))
elif n_pred == n-1 and m_pred == m:
# n index for the string with one additional element in front
paths.append((seq_trail_m + "-", seq_trail_n + self.stringN[n-1]))
print("go n-1 from {} with val {} - Tuple {},{}".format((n,m), self.score_matrix[n,m], self.stringN[n-1], "-"))
else:
print("ERROR! Did not find correct neighbor for n_pred {} n {} m_pred {} m {}".format(n_pred, n, m_pred, m))
return paths
def align(self):
"""
Align given strings using the Smith-Waterman algorithm.
NB: score matrix and the substitution matrix are different matrices!
"""
#Sm,n = max {
# Sm-1,n-1 + d(m, n)
# Sm-1,n + Gap
# Sm,n-1 + Gap
# 0
# }
M = len(self.stringM)
N = len(self.stringN)
for n in range(0,N+1):
self.score_matrix[n,0] = 0
for m in range(1,M+1):
self.score_matrix[0,m] = 0
# Fill
for n in range(1,N+1):
for m in range(1,M+1):
# Zu denjenigen Nachbarzellen, die zum o.g. Maximum geführt haben, wird eine Pfeilmarkierung gesetzt. http://webclu.bio.wzw.tum.de/binfo/edu/tutorials/pairalign/glob_ali.html
predecessor_opts = [
(n-1,m-1, self.score_matrix[n-1,m-1] + self.substituion_matrix[self.stringN[n-1]][self.stringM[m-1]]),
(n,m-1, self.score_matrix[n,m-1] + self.gap_penalty),
(n-1,m, self.score_matrix[n-1,m] + self.gap_penalty),
(None, None, 0)
]
max_val = max([val for _,_,val in predecessor_opts])
self.score_matrix[n,m] = max_val
if max_val > 0:
pred_list = []
self.traceback[(n,m)] = pred_list
for n_pred,m_pred,val in [(n_pred,m_pred,val) for n_pred,m_pred,val in predecessor_opts if val == max_val]:
if self.score_matrix[n_pred,m_pred] > 0:
# add to traceback only if predecessor has value greater zero
pred_list.append((n_pred,m_pred))
print("self.score_matrix \n{}".format(self.score_matrix))
#print("self.traceback \n{}".format(self.traceback))
# Traceback
# Zur Formulierung aller möglichen globalen Alignments ist das sog. "Traceback" durchzuführen.
# Dabei werden alle per Pfeilmarkierung erreichbaren Pfade von der Zelle rechts unten nach links oben verfolgt und markiert. Das Alignment wird aus dem Traceback ermittelt,
# indem (von links oben nach rechts unten) aus allen durch einen Tracebackpfad berührten Zellen der Matrix die zugehörigen Zeichen der beiden Sequenzen entnommen werden.
# Wird dabei eine Position einer Sequenz mehrfach berührt, so wird stattdessen eine Lücke (-) geschrieben.
# only first occurrence of max value returned
n_max, m_max = np.unravel_index(np.argmax(self.score_matrix, axis=None), self.score_matrix.shape)
self.score = self.score_matrix[n_max, m_max]
if self.score > 0:
self.align_end = (n_max, m_max)
self.paths = self.get_paths(n_max, m_max)
print("Score: {}".format(self.score))
print("For input: {}".format((self.stringM,self.stringN)))
print("Paths: {}".format(self.paths))
print("Paths from {} to {}".format(self.align_start, self.align_end))
else:
self.paths = [('', '')]
print("No Paths found!")
def has_alignment(self):
"""
:return: True if a local alignment has been found, False otherwise
"""
return self.paths[0] != ('', '')
def get_alignment(self):
"""
:return: alignment represented as a tuple of aligned strings
"""
if len(self.paths) < 1:
return None
if len(self.paths) > 1:
print("More than one path found!")
return self.paths[0]
def is_residue_aligned(self, string_number, residue_index):
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been alined
False otherwise
"""
# check if is included in string
# (m,n), align_start/end is (n,m) with additional first element
if self.has_alignment():
if string_number == 1:
return self.within_range(residue_index, self.align_start[1]-1, self.align_end[1]-1)
elif string_number == 2:
return self.within_range(residue_index, self.align_start[0]-1, self.align_end[0]-1)
else:
print("No string with number " + string_number)
return False
else:
return False
def within_range(self, val, start, inclusive_end):
if val >= start and val <= inclusive_end:
return True
else:
return False
def main():
print('PDB parser class.')
aa_list = ['A','R','N','D','C','Q','E','G','H','I','L','K','M','F','P','S','T','W','Y','V']*5
string1 = ''.join(random.sample(aa_list,random.randint(0, 24)))
string2 = ''.join(random.sample(aa_list,random.randint(0, 24)))
# alignment = LocalAlignment(string1, string2, -6, matrices.MATRICES['blosum'])
alignment = LocalAlignment('SHLPYIFQSKEWYTRKRMWFCNDCMG', 'VQLLYPWAGKMPHQGCDETASMGNHKEQ', -6, matrices.MATRICES['blosum'])
print(alignment.get_alignment())
print(alignment.is_residue_aligned(1,0))
#print(alignment.is_residue_aligned(1,3))
#print(alignment.is_residue_aligned(2,0))
print(alignment.is_residue_aligned(2,6))
return None
if __name__ == '__main__':
main()
'''
"strings": ["ARNDCEQGHI", "DDCEQHG"],
"gap_penalty": -6,
"matrix": "blosum",
"alignment": ["NDCEQGH", "DDCEQ-H"],
1 0 False; 1 3 True
2 0 True; 2 6 False
Score: 28
self.score_matrix
A R N D C E Q G H I
0 1 2 3 4 5 6 7 8 9 10
0[[ 0 0 0 0 0 0 0 0 0 0 0]
D 1 [ 0 0 0 1 6 0 2 0 0 0 0]
D 2 [ 0 0 0 1 7 3 2 2 0 0 0]
C 3 [ 0 0 0 0 1 16 10 4 0 0 0]
E 4 [ 0 0 0 0 2 10 21 15 9 3 0]
Q 5 [ 0 0 1 0 0 4 15 26 20 14 8]
H 6 [ 0 0 0 2 0 0 9 20 24 28 22]
G 7 [ 0 0 0 0 1 0 3 14 26 22 24]]
For input: ('ARNDCEQGHI', 'DDCEQHG')
Paths: [('NDCEQGH', 'DDCEQ-H')]
Paths from (0, 2) to (6, 9)
('NDCEQGH', 'DDCEQ-H')
ARNDCEQGHI
DDCEQ-HG
'''
'''
[[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
[ 0 0 0 1 0 0 3 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 0]
[ 0 0 0 0 0 0 0 0 5 0 1 2 0 0 0 1 1 1 0 0 0 0 0 0 0 0 0]
[ 0 0 0 4 0 0 2 0 0 3 0 0 0 0 0 0 0 0 3 0 0 0 0 0 0 2 0]
[ 0 0 0 4 1 0 2 2 0 0 1 0 0 0 0 0 0 0 2 1 0 0 0 0 0 2 0]
[ 0 0 2 0 1 8 2 5 1 0 0 0 2 7 1 0 0 0 0 4 4 0 0 0 0 0 0]
[ 0 0 0 0 7 2 5 0 4 0 0 0 0 1 6 0 0 0 0 0 0 1 0 0 0 0 0]
[ 0 0 0 0 1 9 3 6 0 1 0 0 11 5 0 3 0 0 0 11 5 0 0 0 0 0 0]
[ 0 1 0 0 0 3 8 2 5 1 0 0 5 9 5 0 2 0 0 5 9 5 0 0 0 0 0]
[ 0 0 0 0 0 0 2 5 0 5 0 0 0 3 7 3 0 0 0 0 3 6 5 0 0 0 6]
[ 0 0 0 0 0 0 0 0 6 0 10 4 0 0 2 9 8 2 0 0 0 0 6 4 0 0 0]
[ 0 0 0 2 0 0 1 0 0 5 4 8 3 0 0 3 8 7 7 1 0 0 0 3 3 5 0]
[ 0 0 0 0 9 3 0 0 0 0 4 3 4 0 0 0 2 6 5 3 0 0 0 0 0 1 3]
[ 0 0 8 2 3 11 5 0 0 0 0 4 1 6 0 0 0 2 4 3 2 0 1 0 0 0 0]
[ 0 0 2 6 1 5 8 2 5 0 1 2 2 0 5 1 1 1 2 2 0 0 0 1 0 0 0]
[ 0 0 0 0 4 0 2 5 0 5 0 0 0 0 0 3 0 0 0 0 0 0 0 0 0 0 6]
[ 0 0 0 0 0 2 0 0 2 0 2 0 0 0 0 0 0 0 0 0 0 9 3 0 9 3 0]
[ 0 0 0 0 0 0 0 0 0 2 0 4 0 0 0 0 0 0 0 0 0 3 10 9 3 6 2]
[ 0 0 0 0 0 0 0 0 2 0 3 5 1 0 0 0 1 0 0 0 0 0 4 12 6 1 4]
[ 0 1 0 0 0 0 0 0 0 3 0 2 3 0 5 0 0 0 0 0 0 0 0 6 11 5 0]
[ 0 1 0 0 0 0 0 0 0 1 2 0 0 1 0 4 0 0 0 0 0 0 0 0 6 10 5]
[ 0 4 0 0 0 0 0 0 0 4 1 2 0 0 2 0 4 0 0 0 0 0 1 0 0 5 10]
[ 0 0 2 2 0 0 1 0 0 0 3 0 1 0 0 1 0 3 5 0 0 0 0 0 0 5 4]
[ 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 3 0 0 0 0 0 0 11]
[ 0 1 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 6 1 0 0 5]
[ 0 0 9 3 0 2 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 1 5 0 0 0]
[ 0 0 3 7 2 0 0 0 1 0 5 1 0 0 1 2 5 2 0 0 0 0 0 0 2 0 0]
[ 0 0 0 1 6 0 0 0 2 1 1 10 4 0 0 1 3 5 0 0 0 0 0 2 0 0 0]
[ 0 0 0 0 0 5 0 0 5 2 2 4 8 3 0 1 2 4 5 0 0 0 0 0 0 0 0]]
(18, 23) with val 12 - Tuple n E, m D
(17, 22) with val 10 - Tuple n D, m N
(16, 21) with val 9 - Tuple n C, m C
(15, 20) with val 0 - Tuple n G, m F
Score: 12
For input: ('SHLPYIFQSKEWYTRKRMWFCNDCMG', 'VQLLYPWAGKMPHQGCDETASMGNHKEQ')
Paths: []
Paths from None to (18, 23)
'''
<file_sep>import numpy as np
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substituion_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.align()
def S(self, i, j):
a = self.string2[i - 1]
b = self.string1[j - 1]
return self.substituion_matrix[a][b]
def F(self, i, j):
return self.score_matrix[i][j]
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
for i in range(0, len(self.string2) + 1):
self.score_matrix[i][0] = i * self.gap_penalty
for j in range(0, len(self.string1) + 1):
self.score_matrix[0][j] = j * self.gap_penalty
for i in range(1, len(self.string2) + 1):
for j in range(1, len(self.string1) + 1):
match = self.F(i - 1, j - 1) + self.S(i, j)
delete = self.F(i - 1, j) + self.gap_penalty
insert = self.F(i, j - 1) + self.gap_penalty
self.score_matrix[i][j] = max(match, delete, insert)
def get_best_score(self):
"""
:return: the highest score for the aligned strings, int
"""
return self.score_matrix[len(self.string2)][len(self.string1)]
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score
"""
return len(self.get_alignments())
def update(self, i, j):
return self.S(i, j), self.string2[i - 1], self.string1[j - 1]
def traverse(self, sI, sJ, sA, sB, all):
A = sA
B = sB
i = sI
j = sJ
while i > 0 and j > 0:
(s, a, b) = self.update(i, j)
if i > 0 and j > 0 and self.F(i, j) == self.F(i - 1, j - 1) + s:
A += a
B += b
i -= 1
j -= 1
self.traverse(i, j, A, B, all)
(s, a, b) = self.update(i, j)
if i > 0 and self.F(i, j) == self.F(i - 1, j) + self.gap_penalty:
A += a
B += "-"
i -= 1
self.traverse(i, j, A, B, all)
(s, a, b) = self.update(i, j)
if j > 0 and self.F(i, j) == self.F(i, j - 1) + self.gap_penalty:
A += "-"
B += b
j -= 1
self.traverse(i, j, A, B, all)
all.add((B[::-1], A[::-1]))
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
s = set([])
self.traverse(len(self.string2), len(self.string1), "", "", s)
return list(s)
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as a list of lists
"""
return self.score_matrix
<file_sep>##############
# Exercise 2.5
##############
# You can use the supplied test cases for your own testing. Good luck!
# Genetic code dictionary
codon_dict = {
'TTT': 'F',
'TTC': 'F',
'TTA': 'L',
'TTG': 'L',
'TCT': 'S',
'TCC': 'S',
'TCA': 'S',
'TCG': 'S',
'TAT': 'Y',
'TAC': 'Y',
'TGT': 'C',
'TGC': 'C',
'TGG': 'W',
'CTT': 'L',
'CTC': 'L',
'CTA': 'L',
'CTG': 'L',
'CCT': 'P',
'CCC': 'P',
'CCA': 'P',
'CCG': 'P',
'CAT': 'H',
'CAC': 'H',
'CAA': 'Q',
'CAG': 'Q',
'CGT': 'R',
'CGC': 'R',
'CGA': 'R',
'CGG': 'R',
'ATT': 'I',
'ATC': 'I',
'ATA': 'I',
'ATG': 'M',
'ACT': 'T',
'ACC': 'T',
'ACA': 'T',
'ACG': 'T',
'AAT': 'N',
'AAC': 'N',
'AAA': 'K',
'AAG': 'K',
'AGT': 'S',
'AGC': 'S',
'AGA': 'R',
'AGG': 'R',
'GTT': 'V',
'GTC': 'V',
'GTA': 'V',
'GTG': 'V',
'GCT': 'A',
'GCC': 'A',
'GCA': 'A',
'GCG': 'A',
'GAT': 'D',
'GAC': 'D',
'GAA': 'E',
'GAG': 'E',
'GGT': 'G',
'GGC': 'G',
'GGA': 'G',
'GGG': 'G',
'TAG': '',
'TGA': '',
'TAA': '',
}
def codons_to_aa(orf):
if len(orf) % 3 is not 0:
return None
codons = [orf[i:i + 3] for i in range(0, len(orf), 3)]
aa_seq = ''.join(codon_dict[c] for c in codons)
return aa_seq
def complementary(genome):
result = ''
for c in genome.upper():
if c == 'A':
result += 'T'
elif c == 'T':
result += 'A'
elif c == 'G':
result += 'C'
elif c == 'C':
result += 'G'
else:
raise TypeError("Not a DNA sequence.")
return result
def search_orfs(genome, frame, reversed_strand):
started = False
i = frame
result = []
while i <= 2*len(genome):
if i > len(genome):
circular = True
next_codon = genome[i % len(genome)] + genome[(i + 1) % len(genome)] + genome[(i + 2) % len(genome)]
# print(next_codon + '\n')
if not started: # no start codon found yet
if next_codon == 'ATG': # start codon
# print(genome[i % len(genome)])
started = True
if reversed_strand:
i_start = len(genome) - (i % len(genome))
else:
i_start = i % len(genome)
else: # already in the sequence
if next_codon == 'TAG' or next_codon == 'TGA' or next_codon == 'TAA': # stop codon found
# print('STOP')
if reversed_strand:
i_end = len(genome) - ((i + 3) % len(genome))
else:
i_end = ((i + 3) % len(genome))
started = False
if reversed_strand:
i_start = len(genome) - i_start
i_end = len(genome) - i_end
if (len(genome[i_start:i_end]) % 3) == 0:
# print(i_start, i_end)
# print(len(genome[i_start:i_end]))
aa = codons_to_aa(genome[i_start:i_end])
if len(aa) > 33:
if reversed_strand:
i_start = len(genome) - i_start
i_end = len(genome) - i_end
# if circular:
# length = len(genome) - i_start + i_end
# else:
# length = i_end - i_start
final = (i_start, i_end, aa, reversed_strand)
duplicate = False
for orf in result:
if orf[0] == i_start:
duplicate = True
if orf[0] > orf[1]:
if not orf[-1]: # circular and reversed
orf_length = len(genome) - orf[0] + orf[1]
else: # reversed
orf_length = orf[0] - orf[1]
else: # regular
orf_length = orf[1] - orf[0]
if i_start > i_end:
if not reversed_strand:
new_orf_length = len(genome) - i_start + i_end
else:
new_orf_length = i_start - i_end
else:
new_orf_length = i_end - i_start
if orf_length < new_orf_length:
result.remove(orf)
result.append(final)
break
if not duplicate:
result.append(final)
i += 3
print(result)
return result
def get_orfs(genome):
# genome = genome.upper()
if genome.strip('ACGT') != '':
raise TypeError("Not a DNA sequence.")
orfs = []
orfs += (search_orfs(genome, 0, False))
orfs += (search_orfs(genome, 1, False))
orfs += (search_orfs(genome, 2, False))
orfs += (search_orfs(complementary(genome)[::-1], 0, True))
orfs += (search_orfs(complementary(genome)[::-1], 1, True))
orfs += (search_orfs(complementary(genome)[::-1], 2, True))
return orfs
# with open('genome.txt', 'r') as myfile:
# genome = myfile.read().replace('\n', '')
#
# print(get_orfs(genome))
<file_sep>import numpy as np
from collections import Counter
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
if len(sequences) == 0:
raise TypeError
self.sequences = np.array(sequences)
self.len_sequences = len(self.sequences[0])
correct_lengths = [len(item) == self.len_sequences for item in self.sequences]
if not all(correct_lengths):
raise TypeError
contained_in_alphabet = [[letter in ALPHABET for letter in item] for item in self.sequences]
all_contained = np.all(contained_in_alphabet)
if not all_contained:
raise TypeError
self.num_sequences = self.sequences.shape[0]
self.full_array = self.strings_to_rows_for_full_2d(sequences)
self.aa_counts = Counter()
for seq in self.sequences:
tmp_count = Counter(seq)
self.aa_counts += tmp_count
self.col_counts = self.count_columns()
def strings_to_rows_for_full_2d(self, seqs):
strseqs = [list(seq) for seq in seqs]
return np.array(strseqs)
def count_columns(self):
col_counts = []
for col in range(self.len_sequences):
tmp_count = Counter(self.full_array[:, col])
col_counts.append(tmp_count)
return col_counts
def count_columns_with_seq_weights(self, sequence_weights, primary_indices: list):
col_counts = []
for col in primary_indices:
tmp_count = {}
for row_index, letter in enumerate(self.full_array[:, col]):
if letter in tmp_count:
tmp_count[letter] += sequence_weights[row_index]
else:
tmp_count[letter] = sequence_weights[row_index]
col_counts.append(tmp_count)
return col_counts
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
primary_sequence = self.get_primary_sequence()
primary_sequence_indices = self.get_primary_sequence_indices()
pssm = np.zeros((len(primary_sequence), 20))
if bg_matrix is not None:
bg_sums = np.sum(bg_matrix, axis=0)
if use_sequence_weights:
sequence_weights = self.get_sequence_weights() # 1. calc seq weights
col_counts_with_seq_weights = self.count_columns_with_seq_weights(sequence_weights, # 2. count with weights
primary_sequence_indices)
if add_pseudocounts:
# pseudocounts = np.zeros(pssm.shape)
alpha = self.get_number_of_observations() - 1
for index in range(len(primary_sequence)):
if redistribute_gaps:
gaps = 0
overall_index = primary_sequence_indices[index]
if use_sequence_weights:
col_count = col_counts_with_seq_weights[index]
else:
col_count = self.col_counts[overall_index]
for aa in col_count:
if aa == '-':
if redistribute_gaps:
gaps += col_count[aa]
else:
pssm[index, AA_TO_INT[aa]] = col_count[aa]
if add_pseudocounts: # 4. add pseudocounts
pseudo_row = []
for a in range(20):
g_i_a = 0
for j in range(20):
if bg_matrix is None:
p_j = 0.05
q_j_a = 0.0025
else:
p_j = bg_sums[j]
q_j_a = bg_matrix[j][a]
aa = INT_TO_AA[j]
if aa in col_count:
f_i_j = col_count[INT_TO_AA[j]]
else:
f_i_j = 0
g_i_a += f_i_j / p_j * q_j_a
# pseudocounts[index, a] = g_i_a
pseudo_row.append(g_i_a)
pseudo_row = np.array(pseudo_row)
f_i_a = alpha * pssm[index] # only
f_i_b = beta * pseudo_row # to
f_i_c = f_i_a + f_i_b # debug
f_i = (alpha * pssm[index] + beta * pseudo_row) / (alpha + beta)
pssm[index] = f_i
if redistribute_gaps: # 3. redistribute
if bg_matrix is None:
pssm[index] += gaps * 0.05
else:
for col_index in range(len(pssm[index])):
pssm[index, col_index] += gaps * bg_sums[col_index]
pssm[index] /= np.sum(pssm[index]) # 5. normalize to rel freq
if bg_matrix is None: # 6. divide by background freq
pssm /= 0.05
else:
for col_index in range(pssm.shape[1]):
pssm[:, col_index] /= bg_sums[col_index]
pssm = 2 * np.log2(pssm) # 7. log score
# 8. remove row that are - ? I never calced those in the first place
pssm[np.isneginf(pssm)] = -20
pssm = np.round(pssm, 0)
return np.rint(pssm).astype(np.int64)
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return self.num_sequences, self.len_sequences
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
return self.sequences[0].replace('-', '')
def get_primary_sequence_indices(self):
res = []
index = 0
for letter in self.sequences[0]:
if letter == '-':
index += 1
else:
res.append(index)
index += 1
return res
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
weights = np.zeros(self.full_array.shape)
for col_index in range(weights.shape[1]):
r = len(self.col_counts[col_index]) # num of different aa in MSA[col_index]
if r > 1:
for row_index in range(weights.shape[0]):
letter = self.full_array[row_index, col_index]
s = self.col_counts[col_index][letter]
weights[row_index, col_index] = 1 / (r * s)
weights_summed = np.sum(weights, axis=1)
return weights_summed.astype(np.float64)
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
sumboy = 0
for col in self.col_counts:
sumboy += len(col)
num_obs = sumboy / self.len_sequences
# return num_obs.astype(np.float64)
return num_obs
<file_sep>##############
# Exercise 2.6
##############
class AADist:
proteins=[]
count=0.0
total_length=0.0
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
return self.count
def get_average_length(self):
for pro in self.proteins:
self.total_length+=len(pro)
return (self.total_length/self.count)
def read_fasta(self,path):
#global count
with open(path) as infile:
for line in infile:
if line.startswith(">"):
self.count=self.count+1
continue
x=line.strip()
if x !='':
if '*' in x:
x=x[:-1]
self.proteins.append(x)
else:
self.proteins.append(x)
def get_abs_frequencies(self):
# return number of occurences not normalized by length
frequencies={}
for pro in self.proteins:
for aa in pro:
if aa in frequencies:
frequencies[aa]+=1
else:
frequencies[aa]=1
return frequencies
def get_av_frequencies(self):
# return number of occurences normalized by length
avg_freq=self.get_abs_frequencies()
for i in avg_freq:
avg_freq[i]=avg_freq[i]/self.total_length
return avg_freq
<file_sep>#genome = ''
#with open('tests/genome.txt','r') as file:
# for line in file:
# genome += line [:-1]
complementNS = {'A':'T','T':'A','C':'G','G':'C'}
codontable = {
'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',
'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',
'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',
'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',
'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',
'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',
'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',
'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',
'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',
'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',
'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',
'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',
'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',
'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',
'TAC':'Y', 'TAT':'Y', 'TAA':'_', 'TAG':'_',
'TGC':'C', 'TGT':'C', 'TGA':'_', 'TGG':'W',
}
def secondary(primary):
primary = primary[::-1]
# print(primary)
secondary = ''.join([complementNS[c] for c in primary])
# print(secondary)
return secondary
def findStop(stop, codons):
indices = []
for i in range(len(codons)):
if i > 33 and stop == codons[i]:
indices.append(i)
return indices
def nextStopFrom(strand):
### Cut the strand into pieces of three on the three reading frames
pieces = cutIntoThree(strand)
### Find start Stop ind the pieces
indexList = findStop('TGA',pieces) + findStop('TAG',pieces) + findStop('TAA',pieces)
# print(indexList)
# input()
# raise Exception
try:
nextStop = min(indexList)
return nextStop * 3
except ValueError:
return -1
def nextStartFrom(strand):
try:
start_index = strand.index('ATG')
return start_index
except ValueError:
return -1
def cutIntoThree(strand):
pieces = []
while len(strand) >= 3:
pieces.append(strand[:3])
strand = strand[3:]
# print(pieces)
# print('rest of strand',strand)
return pieces
def orfToAA(orf):
pieces = cutIntoThree(orf)
# print(pieces)
# raise Exception
aa = ''.join([codontable[x]for x in pieces])
# print(len(aa), aa)
return aa
def get_orfs(genome):
overalltuples = []
for start,stop in get_orfs2(genome):
# print('start',start,'stop',stop,'\n',genome[start:stop]) # overflow umrechnen wenn stop kleiner ist als start
orphane = orfToAA((genome*2)[start:stop])
overalltuples.append((start,stop%len(genome),orphane,False))
for start,stop in get_orfs2(secondary(genome)):
# print('start',start,'stop',stop,'\n',genome[start:stop]) # overflow umrechnen wenn stop kleiner ist als start
orphane = orfToAA((secondary(genome)*2)[start:stop])
start,stop = reversingStartStop(start,stop,genome)
overalltuples.append((start,stop,orphane,False))
return overalltuples
def reversingStartStop(genome,start,stop):
if stop > len(genome):
return len(genome)-start,stop%len(genome)
else:
return len(genome)-start,len(genome)-stop
def get_orfs2(genome):
if len(genome) != len([x for x in genome if x == 'A' or x == 'T' or x=='C' or x == 'G']):
raise ValueError
saveGenome = genome[:]
indexoffset = 0
startStopTuples = []
stopSet = set()
# print(len(genome))
while len(genome) > 3:
# print('################################################################')
# print(len(genome))
start = nextStartFrom(genome)
if start != -1:
# print('start is ',start )
stop = nextStopFrom(genome[start:]+ saveGenome[:start])
if stop != -1 and stop not in stopSet:
stopSet.add(indexoffset + start + stop + 3)
# print('stop is ', stop)
# print('real start/stop',(indexoffset + start,indexoffset + start + stop + 3))
# orfToAA(saveGenome[indexoffset + start:indexoffset + start + stop + 3])
startStopTuples.append((indexoffset + start,indexoffset + start + stop + 3))
genome = genome[start:]
indexoffset += start
# print(genome)
# print('start is ',start, 'stop is ', stop, 'real start/stop',(indexoffset + start,indexoffset + start + stop+3))
# input()
genome = genome[1:]
indexoffset += 1
# print('len(genome)',len(genome),'\n',genome)
return startStopTuples
#for start,stop in get_orfs(secondary(saveGenome)):
# print('start',start,'stop',stop-1,'\n',saveGenome[start:stop]) # start stop umrechnen wegen reverse
# orfToAA(saveGenome[start:stop])
#raise Exception
# print('_______________________________________________________')
#print(genome)
#secondary(genome)
<file_sep>##############
# Exercise 2.6
##############
class AADist:
"""The class provides a method to read fasta files and to calculate certain
statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
return len(self.__sequences)
def get_average_length(self):
length = sum([len(seq) for seq in self.__sequences])
return length / self.get_counts()
def read_fasta(self, path):
with open(path) as f:
protein = None
while True:
line = f.readline()
if not line or line.startswith(">"):
if protein is not None or protein == "":
self.__sequences.append(protein)
protein = ""
if not line:
break
else:
assert protein is not None
# star replacement could break things! did this for avg_length calculation
protein += line.strip().replace("*", "")
def get_sequences(self):
return self.__sequences
def get_abs_frequencies(self):
# return number of occurences not normalized by length
freqs = {}
seqs = "".join(self.__sequences)
for aa in seqs:
if aa not in freqs:
freqs[aa] = 1
else:
freqs[aa] += 1
return freqs
def get_av_frequencies(self):
# return number of occurences normalized by length
abs_freqs = self.get_abs_frequencies()
length = sum([len(seq) for seq in self.__sequences])
av_freqs = {}
for aa, abs_freq in abs_freqs.items():
av_freqs[aa] = abs_freq / length
return av_freqs
<file_sep>import numpy as np
import os
#from .matrices import MATRICES
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substituion_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.alignmentList = []
self.align()
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
nbrRows = len(self.string2) + 1
nbrCols = len(self.string1) + 1
for row,_ in enumerate(self.score_matrix):
self.score_matrix[row][0] = row * self.gap_penalty
for col,_ in enumerate(self.score_matrix.T):
self.score_matrix[0][col] = col * self.gap_penalty
for row, residue2 in enumerate(self.string2):
for col, residue1 in enumerate(self.string1):
diagonal = self.score_matrix[row][col] + self.substituion_matrix[residue1][residue2]
gapHorizontal = self.score_matrix[row+1][col] + self.gap_penalty
gapVertical = self.score_matrix[row][col+1] + self.gap_penalty
maxScore = max(diagonal, gapHorizontal, gapVertical)
self.score_matrix[row+1][col+1] = maxScore
print(self.score_matrix)
def get_best_score(self):
"""
:return: the highest score for the aligned strings, int
"""
print(self.score_matrix[len(self.score_matrix) - 1][len(self.score_matrix[0]) - 1])
return self.score_matrix[len(self.score_matrix) - 1][len(self.score_matrix[0]) - 1]
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score
"""
row = len(self.score_matrix)
col = len(self.score_matrix[0])
self.get_optimal_alignment(row-1, col-1, ['', ''])
print(self.alignmentList)
return len(self.alignmentList)
def get_optimal_alignment(self, row, col, result):
print("______ROWCOL________")
print(row, col)
if row == 0 or col == 0:
alig1 = result[0][::-1]
alig2 = result[1][::-1]
self.alignmentList.append((alig1,alig2))
print("appened" , result)
print(self.alignmentList)
result[0] = result[0][:-1]
result[1] = result[1][:-1]
return
if row > 0 and col > 0:
print(self.score_matrix[row][col])
print(self.score_matrix[row-1][col-1])
print(self.string2)
print(self.string1)
print(self.string2[row-1])
print(self.string1[col-1])
print(self.score_matrix[row][col] - self.substituion_matrix[self.string1[col-1]][self.string2[row-1]])
current = self.score_matrix[row][col]
diagonal = self.score_matrix[row-1][col-1]
vertical = self.score_matrix[row-1][col]
horizontal = self.score_matrix[row][col-1]
charString1 = self.string1[col-1]
charString2 = self.string2[row-1]
subst = self.substituion_matrix[charString1][charString2]
#1. Fall diagonal: Wert muss kleiner der substitution matrix sein
if diagonal == (current - subst):
print("Fall 1")
result[0] += charString1
result[1] += charString2
print(result)
self.get_optimal_alignment(row-1, col-1, result)
#2. Fall links: Wert - gap_penalty --> vertical
if vertical == (current - self.gap_penalty):
print("Fall 2")
result[0] += ("-")
result[1] += charString2
print(result)
self.get_optimal_alignment(row-1, col, result)
#3. Fall oben: Wert - gap_penalty --> horizontal
if horizontal == (current - self.gap_penalty):
print("Fall 3")
result[0] += charString1
result[1] += ("-")
print(result)
self.get_optimal_alignment(row, col-1, result)
result[0] = result[0][:-1]
result[1] = result[1][:-1]
print("Fall 4")
print(row, col)
return
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
return self.alignmentList
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as a list of lists
"""
return self.score_matrix
<file_sep>##############
# Exercise 2.7
##############
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
switcher = {
'A': False,
'R': True,
'N': False,
'D': False,
'C': False,
'Q': False,
'E': False,
'G': False,
'H': True,
'I': False,
'L': False,
'K': True,
'M': False,
'F': False,
'P': False,
'S': False,
'T': False,
'W': False,
'Y': False,
'V': False,
}
return switcher.get(aa, False)
def isNegativelyCharged(aa):
switcher = {
'A': False,
'R': False,
'N': False,
'D': True,
'C': False,
'Q': False,
'E': True,
'G': False,
'H': False,
'I': False,
'L': False,
'K': False,
'M': False,
'F': False,
'P': False,
'S': False,
'T': False,
'W': False,
'Y': False,
'V': False,
}
return switcher.get(aa, False)
def isHydrophobic(aa):
switcher = {
'A': True,
'R': False,
'N': False,
'D': False,
'C': False,
'Q': False,
'E': False,
'G': False,
'H': False,
'I': True,
'L': True,
'K': False,
'M': True,
'F': True,
'P': False,
'S': False,
'T': False,
'W': True,
'Y': True,
'V': True,
}
return switcher.get(aa, False)
def isAromatic(aa):
switcher = {
'A': False,
'R': False,
'N': False,
'D': False,
'C': False,
'Q': False,
'E': False,
'G': False,
'H': True,
'I': False,
'L': False,
'K': False,
'M': False,
'F': True,
'P': False,
'S': False,
'T': False,
'W': True,
'Y': True,
'V': False,
}
return switcher.get(aa, False)
def isPolar(aa):
switcher = {
'A': False,
'R': True,
'N': True,
'D': True,
'C': False,
'Q': True,
'E': True,
'G': False,
'H': True,
'I': False,
'L': False,
'K': True,
'M': False,
'F': False,
'P': False,
'S': True,
'T': True,
'W': False,
'Y': True,
'V': False,
}
return switcher.get(aa, False)
def isProline(aa):
switcher = {
'A': False,
'R': False,
'N': False,
'D': False,
'C': False,
'Q': False,
'E': False,
'G': False,
'H': False,
'I': False,
'L': False,
'K': False,
'M': False,
'F': False,
'P': True,
'S': False,
'T': False,
'W': False,
'Y': False,
'V': False,
}
return switcher.get(aa, False)
def containsSulfur(aa):
switcher = {
'A': False,
'R': False,
'N': False,
'D': False,
'C': True,
'Q': False,
'E': False,
'G': False,
'H': False,
'I': False,
'L': False,
'K': False,
'M': True,
'F': False,
'P': False,
'S': False,
'T': False,
'W': False,
'Y': False,
'V': False,
}
return switcher.get(aa, False)
def isAcid(aa):
switcher = {
'A': False,
'R': False,
'N': False,
'D': True,
'C': False,
'Q': False,
'E': True,
'G': False,
'H': False,
'I': False,
'L': False,
'K': False,
'M': False,
'F': False,
'P': False,
'S': False,
'T': False,
'W': False,
'Y': False,
'V': False,
}
return switcher.get(aa, False)
def isBasic(aa):
switcher = {
'A': False,
'R': True,
'N': False,
'D': False,
'C': False,
'Q': False,
'E': False,
'G': False,
'H': True,
'I': False,
'L': False,
'K': True,
'M': False,
'F': False,
'P': False,
'S': False,
'T': False,
'W': False,
'Y': False,
'V': False,
}
return switcher.get(aa, False)<file_sep>##############
# Exercise 2.5
##############
# You can use the supplied test cases for your own testing. Good luck!
from ex11 import codons_to_aa
import re
COMPLEMENTS = {
'A': 'T',
'T': 'A',
'C': 'G',
'G': 'C'
}
def complementary(strand):
return ''.join(COMPLEMENTS[character] for character in strand.upper())
def get_orfs(genome):
DNA1 = False
DNA2 = False
stop_circular = False
orfs = []
codon_length = 3
initial_len = len(genome)
for el in genome:
if el not in ["A", "G", "C", "T"]:
raise TypeError("It is not a DNA string")
genome += genome
start_codons = [m.start() for m in re.finditer('ATG', genome)]
stop_codon = ['TAA', 'TAG', 'TGA']
for start_codon in start_codons:
orf_aux = ""
if stop_circular:
break
for i in range(start_codon, len(genome) -
((len(genome)-start_codon) % 3), codon_length):
if i in start_codons and i != start_codon:
start_codons.remove(i)
codon = codons_to_aa(genome[i:i+codon_length])
orf_aux += codon
if(codon == '_'):
if i > initial_len:
stop_circular = True
if(len(orf_aux) > 33 + 1):
orfs.append((start_codon % initial_len, (i+codon_length-1) % initial_len, orf_aux[:-1], False))
DNA1 = True
break
genome = complementary(genome)[::-1]
start_codons = [m.start() for m in re.finditer('ATG',genome)]
stop_circular = False
for start_codon in start_codons:
orf_aux = ""
if stop_circular:
break
for i in range(start_codon, len(genome) -
((len(genome)-start_codon) % 3), codon_length):
if i in start_codons and i != start_codon:
start_codons.remove(i)
codon = codons_to_aa(genome[i:i+codon_length])
orf_aux += codon
if(codon == '_'):
if i > initial_len:
stop_circular = True
if(len(orf_aux) > 33 + 1):
orfs.append(((len(genome)-1-start_codon) % initial_len,
(len(genome)-i-codon_length) % initial_len, orf_aux[:-1], True))
DNA2 = True
break
if not (DNA1 and DNA2):
raise TypeError
to_delete = []
count = 0
for idx1 in range(len(orfs)):
for idx2 in range(len(orfs)):
if orfs[idx1][2] in orfs[idx2][2] and len(orfs[idx2][2]) > len(orfs[idx1][2]):
to_delete.append(idx1)
if orfs[idx1][1] == orfs[idx2][1] and len(orfs[idx2][2]) > len(orfs[idx1][2]) and idx1 not in to_delete:
to_delete.append(idx1)
for el in to_delete:
del orfs[el-count]
count+=1
return orfs
def main():
lines = open("tests/genome.txt", "r")
genome = ""
for line in lines:
genome += line.strip()
lines.close()
print(codons_to_aa(genome))
#get_orfs(genome)
<file_sep>##############
# Exercise 2.7
##############
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
aas = 'KRH'
return all(i in aas for i in aa)
def isNegativelyCharged(aa):
aas = 'DE'
return all(i in aas for i in aa)
def isHydrophobic(aa):
aas = 'GAVLIMPFW'
return all(i in aas for i in aa)
def isAromatic(aa):
aas = 'FY'
return all(i in aas for i in aa)
def isPolar(aa):
aas = 'STCY'
return all(i in aas for i in aa)
def isProline(aa):
aas = 'P'
return all(i in aas for i in aa)
def containsSulfur(aa):
aas = 'MC'
return all(i in aas for i in aa)
def isAcid(aa):
aas = 'DEH'
return all(i in aas for i in aa)
def isBasic(aa):
aas = 'KR'
return all(i in aas for i in aa)<file_sep>##############
# Exercise 2.5
##############
import json
from pathlib import Path
# You can use the supplied test cases for your own testing. Good luck!
'''
Ala / A GCU, GCC, GCA, GCG GCN Leu / L UUA, UUG, CUU, CUC, CUA, CUG YUR, CUN
Arg / R CGU, CGC, CGA, CGG, AGA, AGG CGN, AGR Lys / K AAA, AAG AAR
Asn / N AAU, AAC AAY Met / M AUG
Asp / D GAU, GAC GAY Phe / F UUU, UUC UUY
Cys / C UGU, UGC UGY Pro / P CCU, CCC, CCA, CCG CCN
Gln / Q CAA, CAG CAR Ser / S UCU, UCC, UCA, UCG, AGU, AGC UCN, AGY
Glu / E GAA, GAG GAR Thr / T ACU, ACC, ACA, ACG ACN
Gly / G GGU, GGC, GGA, GGG GGN Trp / W UGG
His / H CAU, CAC CAY Tyr / Y UAU, UAC UAY
Ile / I AUU, AUC, AUA AUH Val / V GUU, GUC, GUA, GUG GUN
START AUG STOP UAA, UGA, UAG URA, UAG
'''
# Inverse table for the standard genetic code
codon_dict_inverse = {
'A': ['GCU', 'GCC', 'GCA', 'GCG'],
'R': ['CGU', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'],
'N': ['AAU', 'AAC'],
'D': ['GAU', 'GAC'],
'C': ['UGU', 'UGC'],
'Q': ['CAA', 'CAG'],
'E': ['GAA', 'GAG'],
'G': ['GGU', 'GGC', 'GGA', 'GGG'],
'H': ['CAU', 'CAC'],
'I': ['AUU', 'AUC', 'AUA'],
'L': ['UUA', 'UUG', 'CUU', 'CUC', 'CUA', 'CUG'],
'K': ['AAA', 'AAG'],
'M': ['AUG'],
'F': ['UUU', 'UUC'],
'P': ['CCU', 'CCC', 'CCA', 'CCG'],
'S': ['UCU', 'UCC', 'UCA', 'UCG', 'AGU', 'AGC'],
'T': ['ACU', 'ACC', 'ACA', 'ACG'],
'W': ['UGG'],
'Y': ['UAU', 'UAC'],
'V': ['GUU', 'GUC', 'GUA', 'GUG'],
'M': ['AUG'], # 'START'
'X': ['UAA', 'UGA', 'UAG'] # 'STOP'
}
complement_dict = {
'A':'T', 'T':'A', 'G':'C', 'C':'G'
}
# print("Start codon_dict")
# codon_dict = {}
# for aa, codon_list in codon_dict_inverse.items():
# for codon in codon_list:
# codon_dna = codon.replace('U', 'T')
# codon_dict[codon_dna] = aa
# print(codon_dict)
# print("End codon_dict")
codon_dict = {
'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R', 'AGA': 'R', 'AGG': 'R', 'AAT': 'N', 'AAC': 'N', 'GAT': 'D', 'GAC': 'D', 'TGT': 'C',
'TGC': 'C', 'CAA': 'Q', 'CAG': 'Q', 'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G', 'GGA': 'G', 'GGG': 'G', 'CAT': 'H', 'CAC': 'H', 'ATT': 'I', 'ATC': 'I', 'ATA': 'I', 'TTA': 'L',
'TTG': 'L', 'CTT': 'L', 'CTC': 'L', 'CTA': 'L', 'CTG': 'L', 'AAA': 'K', 'AAG': 'K', 'ATG': 'M', 'TTT': 'F', 'TTC': 'F', 'CCT': 'P', 'CCC': 'P', 'CCA': 'P', 'CCG': 'P', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'AGT': 'S', 'AGC': 'S', 'ACT': 'T', 'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'TGG': 'W', 'TAT': 'Y', 'TAC': 'Y', 'GTT': 'V', 'GTC': 'V', 'GTA': 'V',
'GTG': 'V', 'TAA': 'X', 'TGA': 'X', 'TAG': 'X'
}
def codons_to_aa(orf):
if len(orf) % 3 is not 0:
return None
codons = [orf[i:i+3] for i in range(0, len(orf), 3)]
aa_seq = ''.join(codon_dict[c] for c in codons)
return aa_seq
def isDNA(genome):
if not isinstance(genome, str) or genome == '':
print("Genome is not a string or empty")
return False
for residue in ['T','C','A','G']:
genome = genome.replace(residue, '')
if genome != '':
print("Genome contains disallowed characters: ", genome)
return False
return True
def getComplementary(sequence):
comp = ''
for s in sequence:
comp += complement_dict[s]
return comp
def getTriplet(genome, index=0, read_direction=1, n=32):
return genome[(index)%n] + genome[(index+read_direction)%n] + genome[(index+2*read_direction)%n]
def getCodon(genome, index=0, read_direction=1, n=32):
return codon_dict[getTriplet(genome, index=index, read_direction=read_direction, n=n)]
def get_orfs(genome):
if not isDNA(genome):
raise TypeError('get_orfs requires valid DNA genome String')
orf_by_stop_primary = {}
orf_by_stop_reverse = {}
orf_list = []
n = len(genome)
overhead_0 = ( n )%3
overhead_1 = (n-1)%3
overhead_2 = (n-2)%3
print("\tlen: ", n, ", overhead_0: ", overhead_0, ", overhead_1: ", overhead_1, ", overhead_2: ", overhead_2)
# primary strand from 0
res_a = scan_sequence(genome, start=0, stop=n-overhead_0, step=3, isReverse=False, map_by_stop=orf_by_stop_primary)
print("\t\tFor primary strand from 0 found {} proteins".format(len(res_a)))
# primary strand from 1
res_b = scan_sequence(genome, start=1, stop=n-overhead_1, step=3, isReverse=False, map_by_stop=orf_by_stop_primary)
print("\t\tFor primary strand from 1 found {} proteins".format(len(res_b)))
# primary strand from 2
res_c = scan_sequence(genome, start=2, stop=n-overhead_2, step=3, isReverse=False, map_by_stop=orf_by_stop_primary)
print("\t\tFor primary strand from 2 found {} proteins".format(len(res_c)))
orf_list.extend(orf_by_stop_primary.values())
complementary = getComplementary(genome)
# reversed strand from -1 (last element)
res_d = scan_sequence(complementary, start=n-1, stop=overhead_0-1, step=-3, isReverse=True, map_by_stop=orf_by_stop_reverse)
print("\t\tFor reverse strand from n-1 found {} proteins".format(len(res_d)))
# reversed strand from -2
res_e = scan_sequence(complementary, start=n-2, stop=overhead_1-1, step=-3, isReverse=True, map_by_stop=orf_by_stop_reverse)
print("\t\tFor reverse strand from n-2 found {} proteins".format(len(res_e)))
# reversed strand from -3
res_f = scan_sequence(complementary, start=n-3, stop=overhead_2-1, step=-3, isReverse=True, map_by_stop=orf_by_stop_reverse)
print("\t\tFor reverse strand from n-3 found {} proteins".format(len(res_f)))
orf_list.extend(orf_by_stop_reverse.values())
print("\tNumber of Results: ", len(orf_list))
return orf_list
def scan_sequence(genome, start=0, stop=-1, step=3, isReverse=False, map_by_stop={}):
orf_list = []
isActiveORF = False
startIndex = None
aa_string = ""
aa = ''
n = len(genome)
read_direction = -1 if isReverse else 1
for i in range(start,stop,step):
aa = getCodon(genome, index=i, read_direction=read_direction, n=n)
print("i={}: {} for {}".format(i, aa, getTriplet(genome, index=i, read_direction=read_direction, n=n) ))
if isActiveORF:
if aa == 'X': # 'STOP'
# end of protein sequence
# abs(i-startIndex)+1
if len(aa_string) > 33:
# add ORF to list if long enough
# 4-tuples containing (in this order)
# the position of the first DNA residue,
# the position of the last DNA residue (including stop codon),
# the translated amino acid sequence as a single string, and
# a flag which is True if the ORF is parsed from the reversed strand
endIndex = i-2 if isReverse else i+2
orf = (startIndex, endIndex, aa_string, isReverse)
orf_list.append(orf) # only used for counting
if not endIndex in map_by_stop:
map_by_stop[endIndex] = orf
else:
startIndex2, endIndex2, aa_string2, isReverse2 = map_by_stop[endIndex]
if len(aa_string2) < len(aa_string):
print("Replace \n {} by \n {}".format(map_by_stop[endIndex], orf))
map_by_stop[endIndex] = orf
# reset counter after seeing end codon
isActiveORF = False
startIndex = None
aa_string = ''
else:
aa_string = aa_string + aa
elif aa == 'M': # 'START':
# found start codon
isActiveORF = True
startIndex = i
aa_string = aa
else:
# continue search for start
pass
# scan for circular
start_ext = stop
stop_ext = stop + read_direction*n
if not isActiveORF and getCodon(genome, index=start_ext, read_direction=read_direction, n=n) == 'M':
# start codon is on the fringe between circular repetition
isActiveORF = True
startIndex = start_ext
aa_string = ''
if isActiveORF:
for i_ext in range(start_ext,stop_ext,step):
aa = getCodon(genome, index=i_ext, read_direction=read_direction, n=n)
print("i_ext={} at i={}: {} for {}".format(i_ext,(i_ext)%n, aa, getTriplet(genome, index=i, read_direction=read_direction, n=n)))
if aa == 'X': # 'STOP'
# end of protein sequence
if len(aa_string) > 33:
endIndex = (i_ext%n)-2 if isReverse else (i_ext%n)+2
orf = (startIndex%n, endIndex, aa_string, isReverse)
orf_list.append(orf) # only used for counting
if not endIndex in map_by_stop:
map_by_stop[endIndex] = orf
else:
startIndex2, endIndex2, aa_string2, isReverse2 = map_by_stop[endIndex]
if len(aa_string2) < len(aa_string):
print("Replace \n {} by \n {} \n in circular".format(map_by_stop[endIndex], orf))
map_by_stop[endIndex] = orf
# TODO check if reads over shorter start-end protein sequence
# reset counter after seeing end codon
isActiveORF = False
startIndex = None
aa_string = ''
break
else:
aa_string = aa_string + aa
return orf_list # only use return by reference map_by_stop
def json_data():
test_json = './tests/orffinder_test_2.json'
relative_path = Path(__file__).parent
with Path(relative_path, test_json).open('r') as json_file:
json_data = json.load(json_file)
return json_data
def genome(json_data):
return json_data['genome']
def invalid_genome(json_data):
return json_data['invalid_genome']
def orf_list_master(json_data):
orf_list_master = [tuple(e) for e in json_data['orf_list']]
orf_list_master = set(orf_list_master)
return orf_list_master
if __name__ == '__main__':
dna = ("" +
"TTACTTGATACTATTGATTAAAGCGGGGACAAAATTTGCAGAATTTAGATAAGAAAAAAC" +
"CCCTGTTATCGGACAGTTTGGCGACCGGTGATAACAAGGGTTTTGCATCTCCCAAAGGAG" +
"ATCAACATAGGGATAGAATAACACGTTTTGGCATTTTGAAACATAGATCGAAGCAACAAG" +
"AAAACTATTTATTTTCGTTAGCTAAGATTAAAGAAAATTATCATGCCGATGTAAAAAACG" +
"ATGAATCTATTCGCGCCATGAAAACTGCCCAAAAATTAAATGGGTGCGGTAATTTTCTTC" +
"TATTCAAAAATTTTTACACCATTAATCAAATTAAACTCGCCAAGTTCCAAGCTTGTAGTG" +
"AGCATTTGTTATGTCCGTTTTGTGCTGGTATTAGAGCTTCTAAGGCAATTCAAAAATACT" +
"CTGAGCGTGTTGATCAAGTCTTATCTGAAAATCCTCGTTTAAAGCCCGTTATGATCACGT" +
"TTACGGTTAAAAATGGGGTAGACCTAGGGGAACGGTTCACCCATCTTATAAAATCGTTTA" +
"GAACGCTTATAGAGCGTCGTAGGGACTATATTAAAAAAGGGCGTGGCTTTAATGAATTTT" +
"GCAAAATTAATGGTGCGATGTATTCATATGAGAATACTTACAATGAAAAAACTAATGAAT" +
"GGCATCCTCATATTCATATGTTTGCACTTTTGGATGATTGGATAGATCAGGATGAATTGT" +
"CTCAATATTGGCAATCCATTACTGGGGACTCTATGGTCGTTGATATTCGTAGAGCCAAAA" +
"AACAAAAAGACTTAGGCTATTCAGGTGCTGCTGCTGAAGTCTGTAAATATGCTCTCAAAT" +
"TTGGTGATCTTTCTGTAGAAAAGACTTGGGAAGCTTTCAAAGTTTTGAAAGGTAAGCGAT" +
"TAAGTGGGGCTTTTGGATCTCTTTGGGGCGTGAAAATTCCTGAATCATTGATAGATGATC" +
"TTCCAGACGATTCTGATTTACCTTATTTAGAAATGATTTATAAGTTCGTCTTTTCTAAGA" +
"AGTCTTATTACGATTTACAACTTACTCGTCATGTCGAACCTACAGGTAAGGACGACGCCG" +
"ACGAGCTTCGAGGAGAAGAAGGACGCAACCTGTTGGTGAGCATGGACGGGCGAGGAGCGA" +
"GCGACGCTGGGAGGGCCCGCACTGGCGCGCTAGCCCCGCAGCACGGACGAAAAAAACAAC" +
"ACTGGCAAATTCCACCAGTTACTCGTGTTCGGGTTCGGAAGCGAATCCGAAGATGGGACG" +
"GATATTTATGTGTCTTACATTTATAGTTTTTTAACTAGTTAATAATACTAGTAAGTCGGT" +
"GAAATCAGGGAGTGCCATCCCCGATTTCACCTAACCACAACGTACTGTCAAGGAGTACTT" +
"ATCATGGCTAACGCGAATACTACTGTTTTAGAACTTTTAGGCAAACAAGTTTCTTTTGTT" +
"TATGTTCTTAAGTCTGATTCAGATGAATATTCTTTCACCTGTTCTGGTGTTGTGACTGAT" +
"GTAATTATCAGTCTTAACTCTGAACCACAGCTCTCTGTTGATAACGGGGATTTTTATATT" +
"TATTCGGATTTAAAGGATTTCTTTATTAAGTCTGAATAAGGGTCTCTTGGTATATTTACG" +
"AAATCTCATGCCACGCTTGAAACTTAAGGGTTTCAGGCGTGTTTTTTTATAGTTTCTTGA" +
"GAAACGCGTAAGCGTGCATTACTGAAAAAACACCTAAATCTTAGAGTGGGTGTCTACCCA" +
"CGAACTGACATAAAACGATTTAAAAACTTTCCTCTCTGATTACAAGTTCCCTTTAGCCTA" +
"AGAACGGGAGTTACCGAGCTTTGCTCGCCGTGATATTGAATAGATTTTAATTTTGATATA" +
"TAAATGGAATCACAAAATGATTCCTTTTATATATAAGGTAAGATTTATGAGCAATCCATC" +
"AAAATTAGATACTCTTGTTGCAGAGATTAGTGGTACTGCTGAATCTGAATATATTGGTCT" +
"TTATAAGCAGATTCCTTTTCGTGTAAGACTTCCCCTTTTTGCACGTTTATCAGCACTTCA" +
"TACCGTCCAAAATTCCCATAATAAAACTCCGCGTAATGCGCTGTTGAATGACTTACTTGA" +
"AATCGCCATGGATCAAGTTATGTCTAAATTAGATGAGGAAACTTTGGAAGGTCTTGGCCA" +
"TTTAGAAGCTCAATATTATGAAGAGCTTTCTCAATATGATTCTGGAGAACTTTCAGATGA" +
"TTAGAAAACCACGAATTATAAGTAACACTCCTATGGAAAGCCTGGATGAAGCATGTACTT" +
"ACTTTAGACAGGTTCTCCAAGAACATTTAGATAAAAAAAGACAAATTACATTTTTTACCA" +
"TTGCTATGGTTGATGGTCGTTTGAAGTTAATTGAGGGGATACCAGAATGATTAAGCTTGA" +
"AGGCACAGTTTTAAATACATACCACTTAGATGGTGGGACTAATAAAAAGGGCGAAGAATA" +
"TGAAGCTCGAGATAAAGTCCAACTTCTAGGTTCGTTGGAGCTTCCCAACGGACAAATTAA" +
"AAATGAATTAATCGACCTTACTGTCGAAGATTCTCGTATTTATGATGATTTCAAAAATAA" +
"GCTTATTAGCATCAGTTGTGGTGCTATGGCTGTTGGTCGTAACGTTATTTTTTATGTTCG" +
"AAAAGGTGCGAAACCTGTTTTAGCAGATCACTTATGATTTTCCCTAAAAAACATCGAGTT" +
"AGCGAAGCGTCTCGATGTTTTTTCTT"
)
#print("\n\nDNA:")
#print(dna)
#n = len(dna)
#dna = genome(json_data())
# print("\nPrimary 0:")
# print(codons_to_aa(dna[0:n-n%3]))
# print("\nPrimary 1:")
# print(codons_to_aa(dna[1:n-(n-1)%3]))
# print("\nPrimary 2:")
# print(codons_to_aa(dna[2:n-(n-2)%3]))
# print("\n")
# print("\nComplementary 0:")
# print(codons_to_aa(getComplementary((dna[::-1])[0:n-n%3])))
# print("\nComplementary 1:")
# print(codons_to_aa(getComplementary((dna[::-1])[1:n-(n-1)%3])))
# print("\nComplementary 2:")
# print(codons_to_aa(getComplementary((dna[::-1])[2:n-(n-2)%3])))
# print("\n\n")
#result = get_orfs(dna)
#print("Result:")
#for res in result:
# print(res)
#print("Number of Results: ", len(result))
# Own test cases
# 'P': ['CCT', 'CCC', 'CCA', 'CCG'],
# 'W': ['TGG'],
# 'M': ['ATG'], # 'START'
# 'X': ['TAA', 'TGA', 'TAG'] # 'STOP'
repeat = 33
forward_first = 'ATG' + 'CCC' * repeat + 'TAA'
forward_edge_start1 = 'G' + 'CCC' * repeat + 'TAA' + 'AT'
forward_edge_start2 = 'TG' + 'CCC' * repeat + 'TAA' + 'A'
forward_circular = 'TAA' + 'ATG' + 'CCC' * repeat
forward_edge_end1 = 'A' + 'ATG' + 'CCC' * repeat + 'TA'
forward_edge_end2 = 'AA' + 'ATG' + 'CCC' * repeat + 'T'
forward = [ forward_first, forward_edge_start1, forward_edge_start2, forward_circular, forward_edge_end1, forward_edge_end2 ]
backward = [getComplementary(x[::-1]) for x in forward]
print("\n")
'''
for idx, f in enumerate(forward):
result = get_orfs(f)
for res in result:
print("\t", res)
print("Result forward at {} for: {}".format(idx, f))
print("\n")
'''
for idx, b in enumerate(backward):
result = get_orfs(b)
for res in result:
print("\t", res)
print("Result backward at {} for: {}".format(idx, b))
print("\n")
# New tests 'orffinder_test_2.json'
'''
{
(3608, 3285, 'MKKRRFTQEDRTLIELQIEKGVARKEAAKAIYEKVYGLKPRDFALIDSLCSSNGEDYRDQAFETIFKRNREKLKRLTIARAARAELKKLPKEEKSTFSWSASIHKRR', True),
(822, 1661, 'MPPPKGRSFPFAPRHSADWLVSHVTYDQAVDMFFNQTATQQNLGHDPLVYSKVFRGVTYATLQEAQQVFTETMNAEYEVREQRDLADENRGCKASKILNDNIRNRIVPTEDGLESLTAYRKPFGEKCVSPLSLFTKSLNGGSNSHIQANETQAQLVTQQAYDFPLVTKGSKAIQALDDSWGLQCSLEDELNQNHQDVVILDLEDSHKLLNNFWVDLGKDTALIATSLNTANGWFQHTTPIFDAKGLVKQFGDINIKADIVESKGKQFIAFSGKKNGKEI', False),
(5138, 623, 'MDMTTAPLVPYKKPYLSSSQLCKKLID<KEY>', False),
(3199, 3441, 'MKVAATSISELSPLSFLMCEQAKRVSWVDYLLLWIDADQLNVLFSSFGNFFNSALAALAMVSRFNFSLFLLKIVSKAWSL', False),
(2071, 2487, 'MRFLGIVFIPLLILLWWIPTNGVLGDYQDLLNQTDEVRLSLITLIVPIGGFIPLLISVALISIAIYSGKQARLVIGEKWTSTINRTCIYSMILGVVFAVVFALYCIKLLDENGYEYSYNLTQITPTGIHLMYVKSHKE', False),
(4269, 4141, 'MSEGISLEHIVAFLKTKNITVSRDYLKTVLYRIRQKRKLENE', True),
(4148, 3783, 'MNKPKSKTHLVSVRVDGRVNAEIEELLNDTHKDKAELLRALLVRGLFAYKRDLYQTHGDRVHHSIDKSYYAYEREMLDDDLHYIATRFDQLEAMIREHKEPVLTPTKKEGKKTLLDSFFKK', True),
(2569, 2318, 'MLKISMINFMILWFIDKKATLLSGFFVSILCVILHTLNEYLSGLFGLGYNCIHNRFHQAILYNIVQIQPQILHLISCYKYTFY', True),
(2584, 2862, 'MPRCEKTKPASLVFSKYLTTFRRRCKSFHDRSYHIRTASISKQSVVGFRVTRVVGQHHAVGFHTIFRPDTGRETSVTSDNAHQMSGLKQELA', False),
(1692, 2069, 'MNGKKYPINSPKVQQVGLSPKARANGFKGAGVLTFVVSAAIATTDLVFKDDYHLVDWFGNVGADMFKALLQFGAGEAILFGIIAMTGYVTLGLIAVFFVYVSIEWIWSEYKVNDEVVKGLESVIS', False),
(3979, 3875, 'MAIECITQLINRITPTKEKCLMMTYITSPLVLTN', True)
}
'''
'''
AssertionError: One or more ORFs are not part of the solution: (51, 623, 'MFVDSKEEYAKHYQEKYYNEYCPFYRDLPPAWVAIELMTFGNVVKLIQNISDDKIQSLKMDRFSKKFNIQKFQTLISWMNVLHQMRNYCGHHNRLFNRNFPAPTAIKKSLSDAIPLVRTKPNPDKREEDQLNRLYTALAALQCIYSGLGFDEKIGPKISDLFDNYTTTQRFSLSMGFPNGWKEEPLFFDL', False)
AssertionError: One or more ORFs are not part of the solution: (51, 623, 'MFVDSKEEYAKHYQEKYYNEYCPFYRDLPPAWVAIELMTFGNVVKLIQNISDDKIQSLKMDRFSKKFNIQKFQTLISWMNVLHQMRNYCGHHNRLFNRNFPAPTAIKKSLSDAIPLVRTKPNPDKREEDQLNRLYTALAALQCIYSGLGFDEKIGPKISDLFDNYTTTQRFSLSMGFPNGWKEEPLFFDL', False)
'''
# Old tests
'''
len: 2726 , overhead_0: 2 , overhead_1: 1 , overhead_2: 0
Result:
(279, 461, 'MGAVIFFYSKIFTPLIKLNSPSSKLVVSICYVRFVLVLELLRQFKNTLSVLIKSYLKILV', False)
(591, 704, 'MNFAKLMVRCIHMRILTMKKLMNGILIFICLHFWMIG', False)
(1383, 1598, 'MANANTTVLELLGKQVSFVYVLKSDSDEYSFTCSGVVTDVIISLNSEPQLSVDNGDFYIYSDLKDFFIKSE', False)
(1876, 2223, 'MIPFIYKVRFMSNPSKLDTLVAEISGTAESEYIGLYKQIPFRVRLPLFARLSALHTVQNSHNKTPRNALLNDLLEIAMDQVMSKLDEETLEGLGHLEAQYYEELSQYDSGELSDD', False)
(2386, 2676, 'MIKLEGTVLNTYHLDGGTNKKGEEYEARDKVQLLGSLELPNGQIKNELIDLTVEDSRIYDDFKNKLISISCGAMAVGRNVIFYVRKGAKPVLADHL', False)
(257, 1285, 'MKTAQKLNGCGNFLLFKNFYTINQIKLAKFQACSEHLLCPFCAGIRASKAIQKYSERVDQVLSENPRLKPVMITFTVKNGVDLGERFTHLIKSFRTLIERRRDYIKKGRGFNEFCKINGAMYSYENTYNEKTNEWHPHIHMFALLDDWIDQDELSQYWQSITGDSMVVDIRRAKKQKDLGYSGAAAEVCKYALKFGDLSVEKTWEAFKVLKGKRLSGAFGSLWGVKIPESLIDDLPDDSDLPYLEMIYKFVFSKKSYYDLQLTRHVEPTGKDDADELRGEEGRNLLVSMDGRGASDAGRARTGALAPQHGRKKQHWQIPPVTRVRVRKRIRRWDGYLCVLHL', False)
(2177, 2389, 'MKSFLNMILENFQMIRKPRIISNTPMESLDEACTYFRQVLQEHLDKKRQITFFTIAMVDGRLKLIEGIPE', False)
Number of Results: 7
'''
'''
Result:
(279, 461, 'MGAVIFFYSKIFTPLIKLNSPSSKLVVSICYVRFVLVLELLRQFKNTLSVLIKSYLKILV', False)
(591, 704, 'MNFAKLMVRCIHMRILTMKKLMNGILIFICLHFWMIG', False)
(1383, 1598, 'MANANTTVLELLGKQVSFVYVLKSDSDEYSFTCSGVVTDVIISLNSEPQLSVDNGDFYIYSDLKDFFIKSE', False)
(2673, 51, 'MIFPKKHRVSEASRCFFFYLILLIKAGTKFAEFR', False)
(1876, 2223, 'MIPFIYKVRFMSNPSKLDTLVAEISGTAESEYIGLYKQIPFRVRLPLFARLSALHTVQNSHNKTPRNALLNDLLEIAMDQVMSKLDEETLEGLGHLEAQYYEELSQYDSGELSDD', False)
(2386, 2676, 'MIKLEGTVLNTYHLDGGTNKKGEEYEARDKVQLLGSLELPNGQIKNELIDLTVEDSRIYDDFKNKLISISCGAMAVGRNVIFYVRKGAKPVLADHL', False)
(257, 1285, 'MKTAQKLNGCGNFLLFKNFYTINQIKLAKFQACSEHLLCPFCAGIRASKAIQKYSERVDQVLSENPRLKPVMITFTVKNGVDLGERFTHLIKSFRTLIERRRDYIKKGRGFNEFCKINGAMYSYENTYNEKTNEWHPHIHMFALLDDWIDQDELSQYWQSITGDSMVVDIRRAKKQKDLGYSGAAAEVCKYALKFGDLSVEKTWEAFKVLKGKRLSGAFGSLWGVKIPESLIDDLPDDSDLPYLEMIYKFVFSKKSYYDLQLTRHVEPTGKDDADELRGEEGRNLLVSMDGRGASDAGRARTGALAPQHGRKKQHWQIPPVTRVRVRKRIRRWDGYLCVLHL', False)
(2177, 2389, 'MKSFLNMILENFQMIRKPRIISNTPMESLDEACTYFRQVLQEHLDKKRQITFFTIAMVDGRLKLIEGIPE', False)
Number of Results: 8
'''
'''
Result:
(279, 461, 'MGAVIFFYSKIFTPLIKLNSPSSKLVVSICYVRFVLVLELLRQFKNTLSVLIKSYLKILV', False)
(591, 704, 'MNFAKLMVRCIHMRILTMKKLMNGILIFICLHFWMIG', False)
(1383, 1598, 'MANANTTVLELLGKQVSFVYVLKSDSDEYSFTCSGVVTDVIISLNSEPQLSVDNGDFYIYSDLKDFFIKSE', False)
(2673, 51, 'MIFPKKHRVSEASRCFFFYLILLIKAGTKFAEFR', False)
(1876, 2223, 'MIPFIYKVRFMSNPSKLDTLVAEISGTAESEYIGLYKQIPFRVRLPLFARLSALHTVQNSHNKTPRNALLNDLLEIAMDQVMSKLDEETLEGLGHLEAQYYEELSQYDSGELSDD', False)
(2386, 2676, 'MIKLEGTVLNTYHLDGGTNKKGEEYEARDKVQLLGSLELPNGQIKNELIDLTVEDSRIYDDFKNKLISISCGAMAVGRNVIFYVRKGAKPVLADHL', False)
(257, 1285, 'MKTAQKLNGCGNFLLFKNFYTINQIKLAKFQACSEHLLCPFCAGIRASKAIQKYSERVDQVLSENPRLKPVMITFTVKNGVDLGERFTHLIKSFRTLIERRRDYIKKGRGFNEFCKINGAMYSYENTYNEKTNEWHPHIHMFALLDDWIDQDELSQYWQSITGDSMVVDIRRAKKQKDLGYSGAAAEVCKYALKFGDLSVEKTWEAFKVLKGKRLSGAFGSLWGVKIPESLIDDLPDDSDLPYLEMIYKFVFSKKSYYDLQLTRHVEPTGKDDADELRGEEGRNLLVSMDGRGASDAGRARTGALAPQHGRKKQHWQIPPVTRVRVRKRIRRWDGYLCVLHL', False)
(2177, 2389, 'MKSFLNMILENFQMIRKPRIISNTPMESLDEACTYFRQVLQEHLDKKRQITFFTIAMVDGRLKLIEGIPE', False)
(2305, 2123, 'MFLENLSKVSTCFIQAFHRSVTYNSWFSNHLKVLQNHIEKALHNIELLNGQDLPKFPHLI', True)
(107, 0, 'MQNPCYHRSPNCPITGVFSYLNSANFVPALINSIK', True)
Number of Results: 10
'''
'''
orf_list_master = [(257, 1285,'MKTAQKLNGCGNFLLFKNFYTINQIKLAKFQACSEHLLCPFCAGIRASKAIQKYSERVDQVLSENPRLKPVMITFTVKNGVDLGERFTHLIKSFRTLIERRRDYIKKGRGFNEFCKINGAMYSYENTYNEKTNEWHPHIHMFALLDDWIDQDELSQYWQSITGDSMVVDIRRAKKQKDLGYSGAAAEVCKYALKFGDLSVEKTWEAFKVLKGKRLSGAFGSLWGVKIPESLIDDLPDDSDLPYLEMIYKFVFSKKSYYDLQLTRHVEPTGKDDADELRGEEGRNLLVSMDGRGASDAGRARTGALAPQHGRKKQHWQIPPVTRVRVRKRIRRWDGYLCVLHL', False),
(279, 461, 'MGAVIFFYSKIFTPLIKLNSPSSKLVVSICYVRFVLVLELLRQFKNTLSVLIKSYLKILV', False),
(591, 704, 'MNFAKLMVRCIHMRILTMKKLMNGILIFICLHFWMIG', False),
(1383, 1598, 'MANANTTVLELLGKQVSFVYVLKSDSDEYSFTCSGVVTDVIISLNSEPQLSVDNGDFYIYSDLKDFFIKSE', False),
(1876, 2223, 'MIPFIYKVRFMSNPSKLDTLVAEISGTAESEYIGLYKQIPFRVRLPLFARLSALHTVQNSHNKTPRNALLNDLLEIAMDQVMSKLDEETLEGLGHLEAQYYEELSQYDSGELSDD', False),
(2177, 2389, 'MKSFLNMILENFQMIRKPRIISNTPMESLDEACTYFRQVLQEHLDKKRQITFFTIAMVDGRLKLIEGIPE', False),
(2386, 2676, 'MIKLEGTVLNTYHLDGGTNKKGEEYEARDKVQLLGSLELPNGQIKNELIDLTVEDSRIYDDFKNKLISISCGAMAVGRNVIFYVRKGAKPVLADHL', False),
(2673, 51, 'MIFPKKHRVSEASRCFFFYLILLIKAGTKFAEFR', False),
(107, 0, 'MQNPCYHRSPNCPITGVFSYLNSANFVPALINSIK', True),
(2305, 2123, 'MFLENLSKVSTCFIQAFHRSVTYNSWFSNHLKVLQNHIEKALHNIELLNGQDLPKFPHLI', True)]
'''
'''
Primary 0:
LLDTIDXSGDKICRIXIRKNPCYRTVWRPVITRVLHLPKEINIGIEXHVLAFXNIDRSNKKTIYFRXLRLKKIIMPMXKTMNLFAPXKLPKNXMGAVIFFYSKIFTPLIKLNSPSSKLVVSICYVRFVLVLELLRQFKNTLSVLIKSYLK<KEY>
Primary 1:
<KEY>
Primary 2:
<KEY>
Complementary 0:
<KEY>
Complementary 1:
<KEY>
Complementary 2:
<KEY>
'''
<file_sep>##############
# Exercise 2.7
##############
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
if aa=="R" or aa=="H" or aa=="k":
return True
else:
return False
def isBasic(aa):
return isPositivelyCharged(aa)
def isNegativelyCharged(aa):
if aa=="D" or aa=="E":
return True
else:
return False
def isAcid(aa):
return isNegativelyCharged(aa)
def isAromatic(aa):
if aa=="H" or aa=="F" or aa=="W" or aa=="Y":
return True
else:
return False
def isHydrophobic(aa):
if aa=="A" or aa=="I" or aa=="L" or aa=="M" or aa=="F" or aa=="P" or aa=="W"or aa=="V" :
return True
else:
return False
def isPolar(aa):
if aa=="R" or aa=="N" or aa=="D" or aa=="E" or aa=="Q" or aa=="H" or aa=="K" or aa=="S" or aa=="T" or aa=="Y":
return True
else:
return False
def isProline(aa):
if aa=="P":
return True
else:
return False
def containsSulfur(aa):
if aa=="M" or aa=="C" :
return True
else:
return False <file_sep>##############
# Exercise 2.5
##############
# You can use the supplied test cases for your own testing. Good luck!
import re
codon_dict = {
'ATA': 'I', 'ATC': 'I', 'ATT': 'I', 'ATG': 'M',
'ACA': 'T', 'ACC': 'T', 'ACG': 'T', 'ACT': 'T',
'AAC': 'N', 'AAT': 'N', 'AAA': 'K', 'AAG': 'K',
'AGC': 'S', 'AGT': 'S', 'AGA': 'R', 'AGG': 'R',
'CTA': 'L', 'CTC': 'L', 'CTG': 'L', 'CTT': 'L',
'CCA': 'P', 'CCC': 'P', 'CCG': 'P', 'CCT': 'P',
'CAC': 'H', 'CAT': 'H', 'CAA': 'Q', 'CAG': 'Q',
'CGA': 'R', 'CGC': 'R', 'CGG': 'R', 'CGT': 'R',
'GTA': 'V', 'GTC': 'V', 'GTG': 'V', 'GTT': 'V',
'GCA': 'A', 'GCC': 'A', 'GCG': 'A', 'GCT': 'A',
'GAC': 'D', 'GAT': 'D', 'GAA': 'E', 'GAG': 'E',
'GGA': 'G', 'GGC': 'G', 'GGG': 'G', 'GGT': 'G',
'TCA': 'S', 'TCC': 'S', 'TCG': 'S', 'TCT': 'S',
'TTC': 'F', 'TTT': 'F', 'TTA': 'L', 'TTG': 'L',
'TAC': 'Y', 'TAT': 'Y', 'TAA': '', 'TAG': '',
'TGC': 'C', 'TGT': 'C', 'TGA': '', 'TGG': 'W',
}
def codons_to_aa(orf):
if len(orf) % 3 is not 0:
return None
codons = [orf[i:i + 3] for i in range(0, len(orf), 3)]
aa_seq = ''.join(codon_dict[c] for c in codons)
return aa_seq
def intersection(lst1, lst2):
return list(set(lst1) & set(lst2))
def reverse_complementary(sequence):
return sequence[::-1].translate(str.maketrans("ATGC", "TACG"))
def get_orfs(genome):
if not bool(re.match('^[TAGC]+$', genome)):
raise TypeError
frames = []
stop = ["TAA", "TAG", "TGA"]
for (complementary, sequence) in [(False, genome), (True, reverse_complementary(genome))]:
for f1 in range(0, 3):
i = f1
while i < len(sequence):
code = str(sequence[i:i + 3])
if code == 'ATG':
orf = ''
start = i
while code not in stop:
# circular DNA
if i + 3 > len(sequence):
code = str(sequence[i: len(sequence)])
loop = 3 - len(code)
code += str(sequence[0: loop])
i = loop
else:
code = str(sequence[i:i + 3])
i += 3
orf += code
trans = codons_to_aa(orf)
if len(trans) >= 34:
if not complementary:
frames.append((start, i - 1, trans, complementary))
else:
frames.append((len(sequence) - start - 1, len(sequence) - i, trans, complementary))
i -= 3 # set on the correct end
i += 3
frames = list(set(frames))
result = []
for f1 in frames:
biggest = f1
for f2 in frames:
if f1[1] == f2[1] and f1 is not f2:
if len(f2[2]) > len(biggest[2]):
biggest = f2
result.append(biggest)
return result
<file_sep>##############
# Exercise 2.6
##############
import os
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def aa_dist(self, aa_seq):
dist = {}
for aa in aa_seq:
if aa not in dist.keys():
dist[aa] = 0
dist[aa] += 1
for aa in dist:
dist[aa] /= len(aa_seq)
return dist
def get_counts(self):
return len(self.__sequences)
def get_average_length(self):
return sum([len(s) for s in self.__sequences]) / len(self.__sequences)
def read_fasta(self, path):
with open(path, "r") as f:
seq = ''
sequence_started = False
for line in f:
if line.startswith('>') or line.startswith(';'):
if sequence_started:
self.add_sequence(seq)
seq = ''
sequence_started = False
continue
sequence_started = True
seq += line.strip().replace('*', '')
self.add_sequence(seq)
def get_abs_frequencies(self):
# return number of occurences not normalized by length
frequencies = {}
s = [aa for seq in self.__sequences for aa in seq]
for aa in s:
if aa not in frequencies.keys():
frequencies[aa] = 0
frequencies[aa] += 1
return frequencies
def get_av_frequencies(self):
# return number of occurences normalized by length
total_lenght = len([aa for seq in self.__sequences for aa in seq])
norm_freq = self.get_abs_frequencies()
for aa in norm_freq:
norm_freq[aa] /= total_lenght
return norm_freq
def add_sequence(self, seq):
self.__sequences.append(seq)
<file_sep>import numpy as np
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
CLEAN_ALPHABET = ALPHABET.replace('-', '')
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
if len(sequences) == 0:
raise TypeError()
l = len(sequences[0])
for s in sequences:
if len(s) != l:
raise TypeError()
for c in s:
if c not in ALPHABET:
raise TypeError()
self.sequences = sequences
self.matrix = np.array([list(s) for s in sequences])
pass
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
pssm = np.zeros((self.get_size()[1], len(ALPHABET)))
if bg_matrix is None:
freq = [1/len(CLEAN_ALPHABET) for _ in CLEAN_ALPHABET]
freq = np.array(freq)
bg_matrix = np.ones((21, 21)) / 20 / 20
else:
bg_matrix = np.array(bg_matrix)
freq = bg_matrix.sum(axis=0)
weights = None
if use_sequence_weights:
weights = self.get_sequence_weights()
i = -1
for row in self.matrix.T:
if row[0] == '-':
pssm = np.delete(pssm, [i + 1], axis=0)
continue
i += 1
if use_sequence_weights:
counts = {a: 0 for a in ALPHABET}
for j in range(len(row)):
c = row[j]
counts[c] += weights[j]
else:
unique, counts = np.unique(row, return_counts=True)
counts = dict(zip(unique, counts))
for k, v in counts.items():
pssm[i][AA_TO_INT[k]] = v
if redistribute_gaps:
gaps = pssm[:, -1]
gaps = gaps[:, np.newaxis].dot(freq[np.newaxis, :])
pssm = pssm[:, :-1]
if redistribute_gaps:
pssm += gaps
if add_pseudocounts:
pseudo = np.zeros_like(pssm)
L = pseudo.shape[0]
N = self.get_number_of_observations()
for i in range(L):
for a in range(20):
val = 0
for j in range(20):
val += pssm[i, j] / freq[j] * bg_matrix[j, a]
pseudo[i, a] = val
alpha = N - 1
pssm *= alpha
pseudo *= beta
pssm += pseudo
pssm /= alpha + beta
row_sums = pssm.sum(axis=1)
pssm = pssm / row_sums[:, np.newaxis]
pssm = pssm / freq[np.newaxis, :]
pssm = np.log2(pssm) * 2
pssm[pssm == -np.inf] = -20
pssm = np.around(pssm)
return pssm.astype(np.int64)
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return (len(self.sequences), len(self.sequences[0]))
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
return self.sequences[0].replace('-', '')
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
# weights = np.zeros(self.get_size()[0])
arr = np.zeros(self.get_size()).astype(np.float64) # L x S
j = -1
for s in self.matrix.T:
j += 1
r = len(np.unique(s))
if r == 1:
continue
unique, counts = np.unique(s, return_counts=True)
counts = dict(zip(unique, counts))
# for a in ALPHABET:
# counts[a] = counts.get(a, 0)
for i in range(len(s)):
# i = sequence index
# j = residue index
s = counts[self.matrix[i][j]]
arr[i, j] = np.divide(1, r*s, dtype=np.float64)
# weights = np.sum(arr.T, axis=0, dtype=np.float64)
weights = np.array([sum(i) for i in arr])
return weights.astype(np.float64)
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
num_obs = np.float64(0)
# counts = {AA_TO_INT[a]: 0 for a in ALPHABET}
for r in self.matrix.T:
# for c in r:
# counts[AA_TO_INT[c]] += 1
num_obs += len(np.unique(r))
num_obs /= self.get_size()[1]
return num_obs
<file_sep>import numpy as np
UP = "U"
LEFT = "L"
DIAG = "D"
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substituion_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.direction_matrix = [
[[] for _ in range(len(self.score_matrix[0]))]
for _ in range(len(self.score_matrix))
]
self.align()
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
self.score_matrix[0, :] = range(0, len(self.string1) + 1)
self.score_matrix[:, 0] = range(0, len(self.string2) + 1)
self.score_matrix *= self.gap_penalty
for row in self.direction_matrix:
row[0].append(UP)
for col in self.direction_matrix[0]:
col.append(LEFT)
# iterate over rest
for row_index, row_letter in enumerate(self.string2):
for column_index, column_letter in enumerate(self.string1):
diag_val = (
self.score_matrix[row_index, column_index]
+ self.substituion_matrix[row_letter][column_letter]
)
left_val = (
self.score_matrix[row_index + 1, column_index] + self.gap_penalty
)
upper_val = (
self.score_matrix[row_index, column_index + 1] + self.gap_penalty
)
max_val = max(diag_val, left_val, upper_val)
self.score_matrix[row_index + 1, column_index + 1] = max_val
if diag_val == max_val:
self.direction_matrix[row_index + 1][column_index + 1].append(DIAG)
if left_val == max_val:
self.direction_matrix[row_index + 1][column_index + 1].append(LEFT)
if upper_val == max_val:
self.direction_matrix[row_index + 1][column_index + 1].append(UP)
def get_best_score(self):
"""
:return: the highest score for the aligned strings, int
"""
return self.score_matrix[-1, -1]
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score
"""
return len(self.get_alignments())
def backtrack(self, row, column):
if row == 0 and column == 0:
return [("", "")]
if row == 0:
return [("", "-" * column)]
if column == 0:
return [("-" * row, "")]
alignments = []
for direction in self.direction_matrix[row][column]:
if direction == DIAG:
_alignments = self.backtrack(row - 1, column - 1)
for alignment in _alignments:
s2 = alignment[0] + self.string2[row - 1]
s1 = alignment[1] + self.string1[column - 1]
alignments += [(s2, s1)]
if direction == UP:
_alignments = self.backtrack(row - 1, column)
for alignment in _alignments:
s2 = alignment[0] + self.string2[row - 1]
s1 = alignment[1] + "-"
alignments += [(s2, s1)]
if direction == LEFT:
_alignments = self.backtrack(row, column - 1)
for alignment in _alignments:
s2 = alignment[0] + "-"
s1 = alignment[1] + self.string1[column - 1]
alignments += [(s2, s1)]
return alignments
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
row = len(self.string2)
column = len(self.string1)
alignments = self.backtrack(row, column)
return [(y, x) for x, y in alignments]
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as a list of lists
"""
return self.score_matrix
<file_sep>import numpy as np
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
# Check sequence count
if len(sequences) == 0:
raise TypeError('Sequence list must contain at least one element!')
# Check whether all sequences have same length
seq_len = len(sequences[0])
for s in sequences:
if len(s) != seq_len:
raise TypeError('All sequences must have the same length!')
# Check if string contains invalid characters
for char in s:
if char.upper() not in ALPHABET:
raise TypeError('Sequence contains invalid character!')
self.sequences = sequences
# Number of rows
self.num_sequences = self.get_size()[0]
# Number of columns
self.num_chars = self.get_size()[1]
self.r = self.calculate_r()
self.weights = self.get_sequence_weights()
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
pssm = np.zeros((self.num_chars, len(ALPHABET) - 1))
gaps = np.zeros(self.num_chars)
if bg_matrix is not None:
bg_freqs = self.get_bg_freqs(bg_matrix)
for i in range(0, self.num_chars):
# Count AA occurences
for j, seq in enumerate(self.sequences):
x = AA_TO_INT[seq[i]]
if x != GAP_INDEX:
if use_sequence_weights:
pssm[i][x] += self.weights[j]
else:
pssm[i][x] += 1
else:
if use_sequence_weights:
gaps[i] += self.weights[j]
else:
gaps[i] += 1
freqs = pssm
# Redistribute gaps
if redistribute_gaps:
for i, g in enumerate(gaps):
if bg_matrix is None:
gap_weight = g / (len(ALPHABET) - 1)
pssm[i] += gap_weight
else:
for j, aa in enumerate(pssm[i]):
gap_weight = g * bg_freqs[INT_TO_AA[j]]
pssm[i][j] += gap_weight
# Add pseudocounts
if add_pseudocounts:
alpha = self.get_number_of_observations() - 1
pseudocounts = np.zeros((self.num_chars, len(ALPHABET) - 1))
if bg_matrix is None:
for i in range(0, self.num_chars):
for j in range(0, len(ALPHABET) - 1):
# if pssm[i][j] != 0:
# for k in range(0, len(ALPHABET) - 1):
# pseudocounts[i][k] += pssm[i][j] / 0.05 * (0.05 * 0.05)
g = 0
for k in range(0, len(ALPHABET) - 1):
g += (pssm[i][k] / 0.05) * (0.05 * 0.05)
pseudocounts[i][j] = g
pseudocounts[i] *= beta
pseudocounts[i] += alpha * pssm[i]
pseudocounts[i] /= alpha + beta
pssm = pseudocounts
else:
for i in range(0, self.num_chars):
for j in range(0, len(ALPHABET) - 1):
g = 0
for k in range(0, len(ALPHABET) - 1):
g += (pssm[i][k] / bg_freqs[INT_TO_AA[k]]) * (bg_matrix[j][k])
pseudocounts[i][j] = g
pseudocounts[i] *= beta
pseudocounts[i] += alpha * pssm[i]
pseudocounts[i] /= alpha + beta
pssm = pseudocounts
for i in range(0, self.num_chars):
norm = sum(pssm[i])
pssm[i] /= norm
if bg_matrix is None:
pssm[i] /= 1 / (len(ALPHABET) - 1)
else:
for j, aa in enumerate(pssm[i]):
pssm[i][j] /= bg_freqs[INT_TO_AA[j]]
for j in range(0, len(ALPHABET) - 1):
if pssm[i][j] == 0:
pssm[i][j] = -20
else:
pssm[i][j] = 2 * np.log2(pssm[i][j])
# Get primary strand gaps and remove them
gap_rows = []
for i, c in enumerate(self.sequences[0]):
if c == '-':
gap_rows.append(i)
for g in gap_rows[::-1]:
pssm = np.delete(pssm, g, axis=0)
x = np.rint(pssm).astype(np.int64)
return np.rint(pssm).astype(np.int64)
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return len(self.sequences), len(self.sequences[0])
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
return self.remove_gaps(self.sequences[0])
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
weights = np.zeros(self.num_sequences)
table = np.zeros((self.num_sequences, self.num_chars))
s = np.zeros((self.num_sequences, self.num_chars))
for i in range(0, self.num_sequences):
for j in range(0, self.num_chars):
char = self.sequences[i][j]
count = 0
for k in range(0, self.num_sequences):
if self.sequences[k][j] == char:
count += 1
s[i][j] = count
table[i][j] = 1 / (self.r[j] * s[i][j])
for i in range(0, self.num_sequences):
sum = 0
for j in range(0, self.num_chars):
if self.r[j] != 1:
sum += table[i][j]
weights[i] = sum
return weights.astype(np.float64)
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
return sum(self.r) / self.num_chars
def calculate_r(self):
r = np.zeros(self.num_chars)
for i in range(0, self.num_chars):
ret = ''
for ch in self.sequences:
ret += ch[i]
r[i] = len({i: ret.count(i) for i in ret})
return r
def remove_gaps(self, sequence):
return sequence.replace('-', '')
def get_bg_freqs(self, bg_matrix):
ret = {}
for c in ALPHABET:
if c != '-':
ret[c] = sum(bg_matrix[AA_TO_INT[c]])
return ret
<file_sep>import numpy as np
import re
import math
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
self.msa_seq_list = []
self.size = () # msa: nr rows, nr cols
self.check_validity(sequences)
self.abs_freq_gaps = np.zeros((self.size[1], 21), dtype=np.float64)
self.abs_freq = np.zeros((self.size[1], 20), dtype=np.float64)
self.calc_abs_freq()
self.observed_aa_nr = np.count_nonzero(self.abs_freq_gaps, 1)
self.primary_seq = re.sub('[-]', '', self.msa_seq_list[0])
self.weight = np.zeros((self.size[0]), dtype=np.float64)
self.all_weights = np.zeros((self.size[0], self.size[1]), dtype=np.float64)
self.calc_weight()
def calc_aa_score(self, rel_freq, bg_freq):
aa_score = np.zeros((self.size[1], 20), dtype=np.float64)
for foo in range(self.size[1]):
for bar in range(20):
if rel_freq[foo][bar] == 0:
aa_score[foo][bar] = np.NINF
else:
bg = bg_freq[bar]
aa_score[foo][bar] = 2* math.log(rel_freq[foo][bar]/bg, 2)
return aa_score
def calc_weight(self):
#get number of different AAs in MSA column (r)
for foo in range(self.size[1]):
for bar in range(self.size[0]):
r = self.observed_aa_nr[foo]
index_large = AA_TO_INT[self.msa_seq_list[bar][foo]]
s = self.abs_freq_gaps[foo][index_large]
if r > 1 and s > 0:
self.all_weights[bar][foo] = 1/(r*s)
self.weight = np.sum(self.all_weights, 1, dtype=np.float64)
def calc_rel_freq(self, matrix):
sums_rows = np.zeros((self.size[1]), dtype=np.float64)
np.sum(matrix, axis=1, dtype=np.float64, out=sums_rows)
rel_freq = np.zeros((self.size[1], 20), dtype=np.float64)
for foo in range(self.size[1]):
for bar in range(20):
rel_freq[foo][bar] = matrix[foo][bar]/sums_rows[foo]
#print(self.rel_freq)
return rel_freq
def calc_abs_freq(self):
for foo in range(self.size[1]):
count = {'A': 0, 'R': 0, 'N': 0, 'D': 0, 'C': 0, 'Q': 0, 'E': 0, 'G': 0, 'H': 0, 'I': 0, 'L': 0, 'K': 0,
'M': 0, 'F': 0, 'P': 0, 'S': 0, 'T': 0, 'W': 0, 'Y': 0, 'V': 0, '-': 0}
for bar in range(self.size[0]):
aa = self.msa_seq_list[bar][foo]
count[aa] = count[aa] + 1
for amino_a in count:
if not amino_a == '-':
self.abs_freq[foo][AA_TO_INT[amino_a]] = count[amino_a]
self.abs_freq_gaps[foo][AA_TO_INT[amino_a]] = count[amino_a]
def check_validity(self, sequences):
aa_list = 'ARNDCQEGHILKMFPSTWYV-'
# check not empty
if len(sequences) >= 1:
length = len(sequences[0])
self.size = (len(sequences), length)
# check all lenths same
for foo in range(len(sequences)):
if len(sequences[foo]) == length:
# check only valid AAs
for bar in range(length):
if sequences[foo][bar] in aa_list:
# store MSA sequence list
self.msa_seq_list = sequences
else:
raise TypeError("amino acid not valid")
else:
raise TypeError("not all sequences have same length")
else:
raise TypeError("not enough sequences")
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
if use_sequence_weights:
matrix1 = self.get_weight_count()
else:
matrix1 = self.abs_freq_gaps
if not bg_matrix:
bg_matrix = np.full((20, 20), 0.0025)
bg_freq_matrix = np.sum(bg_matrix, 1, dtype=np.float64)
if redistribute_gaps:
pssm = self.redistribute_bg(bg_freq_matrix, matrix1)
else:
pssm = np.delete(matrix1, -1, axis=1)
pssm = pssm.astype(np.float64)
if add_pseudocounts:
pssm = self.add_pseudocounts(pssm, bg_freq_matrix, bg_matrix, beta)
pssm = self.calc_rel_freq(pssm)
pssm = self.calc_aa_score(pssm, bg_freq_matrix)
pssm = self.remove_gap_rows(pssm)
pssm = np.where(pssm == np.NINF, -20, pssm)
return np.rint(pssm).astype(np.int64)
def remove_gap_rows(self, pssm):
res = pssm
for foo in range(self.size[1]-1, -1, -1):
if self.msa_seq_list[0][foo] == '-':
#remove row foo from pssm
res = np.delete(res, foo, axis = 0)
return res
def add_pseudocounts(self, pssm, bg_freq_matrix, sub_freq, beta):
all_sums = np.zeros((self.size[1], 20), dtype=np.float64)
alpha = self.get_number_of_observations()-1
adjusted_freq = np.zeros((self.size[1], 20), dtype=np.float64)
for i in range(self.size[1]):
for a in range(20):
sum_j = 0.0
for j in range(20):
q = sub_freq[j][a]
p = bg_freq_matrix[j]
f = pssm[i][j]
sum_j += (f/p)*q
all_sums[i][a] = sum_j
for foo in range(self.size[1]):
for bar in range(20):
adjusted_freq[foo][bar] = ((alpha*pssm[foo][bar]) + (beta * all_sums[foo][bar]))/(alpha+beta)
return adjusted_freq
def redistribute_bg(self, bg_matrix, matrix1):
#for each in size[1], add (bg_freq or 0.05)* nr of gaps
res = np.zeros((self.size[1],20), dtype=np.float64)
for foo in range(self.size[1]):
gap_count = matrix1[foo][20]
if gap_count > 0:
for bar in range(20):
count = gap_count * bg_matrix[bar]
res[foo][bar] = matrix1[foo][bar] + count
else:
for bar in range(20):
res[foo][bar] = matrix1[foo][bar]
return res
def get_weight_count(self):
weight_counts = np.zeros((self.size[1], 21), dtype=np.float64)
for foo in range(self.size[1]):
for bar in range(self.size[0]):
weight_counts[foo][AA_TO_INT[self.msa_seq_list[bar][foo]]] += self.weight[bar]
#print(weight_counts)
return weight_counts
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return self.size
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
return self.primary_seq
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
return self.weight
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
# N = 1/L *
r_sum = np.sum(self.observed_aa_nr, 0)
num_obs = (1/self.size[1])*r_sum
#num_obs = -1
#pass
return num_obs.astype(np.float64)
<file_sep>import numpy as np
from pathlib import Path
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.storage = []
self.wordStorage = []
self.words = set()
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
self.storage.append(sequence)
self.wordStorage.append([sequence[i:i+3] for i in range(len(sequence)) if i<len(sequence)-2])
self.words = self.words | set(self.wordStorage[-1])
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
return [x for x in self.storage if word in x]
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
return (len(self.storage), len(self.words), int(round(np.mean([len(set(wl)) for wl in self.wordStorage]))), int(round(np.mean([ len([x for x in self.storage if w in x]) for w in self.words]))))
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.substitution_matrix=substitution_matrix
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
ret = set()
if sequence is not None:
cid= []
for i in sequence:
cid.append(ALPHABET.index(i))
for x in range(len(ALPHABET)):
for y in range(len(ALPHABET)):
for z in range(len(ALPHABET)):
for i in range(len(cid)-2):
tmp = self.calc_score_sequence(ALPHABET[x]+ALPHABET[y]+ALPHABET[z], sequence, i)
if tmp >= T:
ret.add(ALPHABET[x]+ALPHABET[y]+ALPHABET[z])
if pssm is not None:
for x in range(len(ALPHABET)):
for y in range(len(ALPHABET)):
for z in range(len(ALPHABET)):
for i in range(len(pssm)-2):
tmp = pssm[i][x]+ pssm[i+1][y] + pssm[i+2][z]
tmp= self.calc_score_pssm(ALPHABET[x]+ALPHABET[y]+ALPHABET[z], pssm, i)
if tmp >= T:
ret.add(ALPHABET[x]+ALPHABET[y]+ALPHABET[z])
return list(ret)
def get_words_withStart(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
ret = set()
if sequence is not None:
cid= []
for i in sequence:
cid.append(ALPHABET.index(i))
for x in range(len(ALPHABET)):
for y in range(len(ALPHABET)):
for z in range(len(ALPHABET)):
for i in range(len(cid)-2):
tmp = self.calc_score_sequence(ALPHABET[x]+ALPHABET[y]+ALPHABET[z], sequence, i)
if tmp >= T:
ret.add((ALPHABET[x]+ALPHABET[y]+ALPHABET[z], i))
if pssm is not None:
for x in range(len(ALPHABET)):
for y in range(len(ALPHABET)):
for z in range(len(ALPHABET)):
for i in range(len(pssm)-2):
tmp = pssm[i][x]+ pssm[i+1][y] + pssm[i+2][z]
if tmp >= T:
ret.add((ALPHABET[x]+ALPHABET[y]+ALPHABET[z], i))
return list(ret)
def calc_score_pssm(self, word, pssm, start):
score = 0
for i in range(len(word)):
score = score + pssm[i+start][ALPHABET.index(word[i])]
return score
def calc_score_sequence(self, word, sequence, start):
score = 0
for i in range(len(word)):
score = score + self.substitution_matrix[ALPHABET.index(word[i])][ALPHABET.index(sequence[start+i])]
return score
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
if query is not None:
words = self.get_words_withStart(sequence=query, T=T)
for w in words:
seq=blast_db.get_sequences(w[0])
for s in seq:
startids = [x for x in range(len(s)-2) if s[x:x+3]==w[0]]
for id in startids:
score = self.calc_score_sequence(w[0], query, w[1])
tmp_score=score
tmp_addit = 0
addit = 0
while tmp_addit+3+id<len(s) and tmp_addit+3+w[1]<len(query) and tmp_score>score-X:
tmp_addit=tmp_addit+1
tmp_score = tmp_score + self.calc_score_sequence(str(s[2+id+tmp_addit]), query, w[1]+2+tmp_addit)
if tmp_score > score:
score = tmp_score
addit=tmp_addit
tmp_sub = 0
sub = 0
tmp_score=score
while id-tmp_sub>0 and w[1]-tmp_sub>0 and tmp_score>score-X:
tmp_sub=tmp_sub+1
tmp_score = tmp_score + self.calc_score_sequence(str(s[id-tmp_sub]), query, w[1]-tmp_sub)
if tmp_score > score:
score = tmp_score
sub=tmp_sub
if score>=S:
if s not in d.keys():
d[s] = set()
d[s].add((w[1]-sub, id-sub, 3+sub+addit, score))
if pssm is not None:
words = self.get_words_withStart(pssm=pssm, T=T)
for w in words:
seq=blast_db.get_sequences(w[0])
for s in seq:
startids = [x for x in range(len(s)-2) if s[x:x+3]==w[0]]
for id in startids:
score = self.calc_score_pssm(w[0], pssm, w[1])
tmp_score=score
tmp_addit = 0
addit = 0
while tmp_addit+3+id<len(s) and tmp_addit+3+w[1]<len(pssm) and tmp_score>score-X:
tmp_addit=tmp_addit+1
tmp_score = self.calc_score_pssm(s[id:3+id+tmp_addit], pssm, w[1])
if tmp_score > score:
score = tmp_score
addit=tmp_addit
tmp_sub = 0
sub = 0
tmp_score=score
while id-tmp_sub>0 and w[1]-tmp_sub>0 and tmp_score>score-X:
tmp_sub=tmp_sub+1
tmp_score = self.calc_score_pssm(s[id-tmp_sub:id+3+addit], pssm, w[1]-tmp_sub)
if tmp_score > score:
score = tmp_score
sub=tmp_sub
if score>=S:
if s not in d.keys():
d[s] = set()
d[s].add((w[1]-sub, id-sub, 3+sub+addit, score))
for k in d.keys():
d[k] = list(d[k])
return d
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]
<file_sep># -*- coding: utf-8 -*-
"""
IMPORTANT!:
Before writing an email asking questions such as
'What does this input has to be like?' or
'What return value do you expect?' PLEASE read our
exercise sheet and the information in this template
carefully.
If something is still unclear, PLEASE talk to your
colleagues before writing an email!
If you experience technical issues or if you find a
bug we are happy to answer your questions. However,
in order to provide quick help in such cases we need
to avoid unnecessary emails such as the examples
shown above.
"""
import os
from Bio.PDB.MMCIFParser import MMCIFParser
#from Bio.PDB.PDBList import PDBList # only required if download desired
from Bio.PDB.Polypeptide import is_aa
from Bio.PDB.Polypeptide import three_to_one
import numpy as np
############# Exercise 2: Protein Data Bank #############
# General remark: In our exercise every structure will have EXACTLY ONE model.
# This is true for nearly all X-Ray structures. NMR structures have several models.
class PDB_Parser:
CIF_PARSER = MMCIFParser() # parser object for reading in structure in CIF format
def __init__( self, path, d_type=np.float32 ):
self.struct_path = path
self.identifier = os.path.split( path )[1].replace('.cif','').replace('.pdb','')
self.structure = self.set_structure()
self.dtype = d_type # save all numpy arrays only as int to make assert comparison easier/more robust
def set_structure( self ):
# go to PDB, download structure as CIF (default) and parse it
#structure_path = self.CIF_DOWNLOADER.retrieve_pdb_file( self.identifier )
return self.CIF_PARSER.get_structure( self.identifier, self.struct_path )
def get_structure( self ):
return self.structure
def get_number_of_chains( self ):
# structure -> model -> chains -> residues
return len( self.structure[0] )
def check_chain_length( self ):
# Check whether all chains in a given PDB structure have the same length
model = self.structure[0] # crystal structures usually have only one mddel. NMR have more
chain_length = set()
for chain in model:
chain_length.add( self.get_number_of_residues( chain.get_id() ) )
if len( chain_length ) > 1:
return False
else:
return True
def get_number_of_residues( self, chain_id ):
# returns the number of residues in a give chain
return len( self.get_sequence( chain_id ) )
def get_sequence( self, chain_id ):
# extract every residue name (three letters) from a given chain in a PDB structure
# return sequence as one-letter-code
chain = self.structure[0][chain_id]
return ''.join( [ three_to_one(residue.get_resname())
for residue in chain if is_aa(residue) ])
def get_ca_distance( self, chain_id_1, index_1, chain_id_2, index_2 ):
# returns the CA distance between two residues of a given chain and a given index
# for easier testing -> round to int
distance = ( self.structure[0][chain_id_1][index_1]['CA'] -
self.structure[0][chain_id_2][index_2]['CA'] )
return int( distance )
def get_residue_name( self, chain_id, index ):
# returns the name of a residue in a given chain at a given index. Has 3-letter annotation, e.g. GLU
return self.structure[0][chain_id][index].get_resname()
def get_number_of_water_molecules( self, chain_id ):
chain = self.structure[0][chain_id]
n_waters = 0
for residue in chain:
if 'HOH' in residue.get_resname():
n_waters += 1
return n_waters
# 3.12 Contact Map
def get_contact_map( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return a complete contact map (see description in exercise sheet)
for a given chain in a Biopython.PDB structure as numpy array.
The values in the matrix describe the c-alpha distance between all residues
in a chain of a Biopython.PDB structure.
Only integer values of the distance have to be given (see below).
'''
length = self.get_number_of_residues( chain_id )
contact_map = np.empty( (length, length), dtype=self.dtype )
contact_map[:] = np.nan # initialize as nan
chain = self.structure[0][chain_id]
for i, residue_1 in enumerate( chain ):
for j, residue_2 in enumerate( chain ):
# create only lower triangle and diagonale of contact map as it is symmetric
# check whether current residue is an AA. Skip e.g. water-molecules
if i <= j and is_aa(residue_1) and is_aa(residue_2):
ca_dist = residue_1['CA'] - residue_2['CA']
contact_map[i,j] = ca_dist
contact_map[j,i] = ca_dist
return contact_map.astype( np.int ) # return as int to make comparison more robust
# 3.13 B-Factors
def get_bfactors( self, chain_id ):
'''
Read in B-Factors from PDB files.
B-Factor for a residue is averaged over the individual B-Factors of
its atoms.
If no B-Factor could be read (e.g. because a residue was not resolved)
np.nan is inserted.
'''
chain = self.structure[0][chain_id]
length = self.get_number_of_residues( chain_id )
b_factors = np.empty( length, dtype=self.dtype )
b_factors[:] = np.nan # initialize all values with NAN
for index, residue in enumerate( chain ): # for each residue in a chain
if is_aa( residue ): # process only AAs; skip waters etc
avg_bfactor = 0
for n_atoms, atom in enumerate( residue, 1 ): # for each atom in a residue
avg_bfactor += atom.get_bfactor() # sum up b_factors for all atoms in a residue
try:
avg_bfactor /= n_atoms # average b_factor for a residue: Divide by number of atoms
except ZeroDivisionError:
avg_bfactor = np.nan
b_factors[ index ] = avg_bfactor
avg_bval = np.nanmean( b_factors ) # calc. mean, neglecting NAN/missing values
std_bval = np.nanstd( b_factors ) # caalc. stdv., neglecting NAN/missing values
normalized_b_values = ( b_factors - avg_bval ) / std_bval
return normalized_b_values.astype( np.int ) # return as int to make comparison more robust
def main():
print('PDB parser class.')
return None
if __name__ == '__main__':
main()
<file_sep>##############
# Exercise 2.5
##############
# You can use the supplied test cases for your own testing. Good luck!
import re
from typing import List, Mapping, Set
codon_dict = {
"CTT": "L",
"TAG": "*",
"ACA": "T",
"ACG": "T",
"ATC": "I",
"AAC": "N",
"ATA": "I",
"AGG": "R",
"CCT": "P",
"ACT": "T",
"AGC": "S",
"AAG": "K",
"AGA": "R",
"CAT": "H",
"AAT": "N",
"ATT": "I",
"CTG": "L",
"CTA": "L",
"CTC": "L",
"CAC": "H",
"AAA": "K",
"CCG": "P",
"AGT": "S",
"CCA": "P",
"CAA": "Q",
"CCC": "P",
"TAT": "Y",
"GGT": "G",
"TGT": "C",
"CGA": "R",
"CAG": "Q",
"TCT": "S",
"GAT": "D",
"CGG": "R",
"TTT": "F",
"TGC": "C",
"GGG": "G",
"TGA": "*",
"GGA": "G",
"TGG": "W",
"GGC": "G",
"TAC": "Y",
"TTC": "F",
"TCG": "S",
"TTA": "L",
"TTG": "L",
"TCC": "S",
"ACC": "T",
"TAA": "*",
"GCA": "A",
"GTA": "V",
"GCC": "A",
"GTC": "V",
"GCG": "A",
"GTG": "V",
"GAG": "E",
"GTT": "V",
"GCT": "A",
"GAC": "D",
"CGT": "R",
"GAA": "E",
"TCA": "S",
"ATG": "M",
"CGC": "R",
}
START_CODON = "ATG"
ENDCODONS = ["TAG", "TAA", "TGA"]
def codons_to_aa(orf):
if len(orf) % 3 is not 0:
return None
codons = [orf[i : i + 3] for i in range(0, len(orf), 3)]
aa_seq = "".join(codon_dict[c] for c in codons)
return aa_seq
def validate(dna_sequence: str):
"""Validate a DNA sequence.
Args:
dna_sequence: string representing a DNA sequence
Raises:
TypeError if dna_sequence is not a string, or contains letters that are not in the set {ATGC}.
"""
if (
not isinstance(dna_sequence, str)
or len(
dna_sequence.replace("A", "")
.replace("T", "")
.replace("G", "")
.replace("C", "")
)
!= 0
):
raise TypeError()
def find_starting_positions(genome) -> List[int]:
iter = re.finditer(START_CODON, genome)
indices = [m.start(0) for m in iter]
return indices
def find_end_positions(genome) -> List[int]:
iter = re.finditer("|".join(ENDCODONS), genome)
indices = [m.start(0) for m in iter]
return indices
def positions_to_phase(position_list: List[int]) -> Mapping[int, List[int]]:
phases = list(map(lambda x: x % 3, position_list))
phase_map = {0: list(), 1: list(), 2: list()}
for position, phase in zip(position_list, phases):
phase_map[phase].append(position)
return phase_map
def find_longest_matches(LEN_GENOME, end_positions_phases, starting_positions_phases):
end_position_2_start_position = {}
for phase in [0, 1, 2]:
possible_end_positions = end_positions_phases[phase]
if not possible_end_positions:
continue
for starting_position in starting_positions_phases[phase]:
# the end one round-trip away because starting position is higher then the highest end
if max(possible_end_positions) < starting_position:
# but don't forget the mod 3 of the lengths
next_phase = (phase + (LEN_GENOME % 3) + 2) % 3
end_position = end_positions_phases[next_phase][0]
else:
# find first item that is bigger then start position
for end_position in possible_end_positions:
if end_position > starting_position:
break
# if endposition is set already look which sequence is longer
# take wrap around into account
if end_position in end_position_2_start_position:
curr_diff = (
end_position
- end_position_2_start_position[end_position]
+ LEN_GENOME
) % LEN_GENOME
new_diff = (end_position - starting_position + LEN_GENOME) % LEN_GENOME
if new_diff > curr_diff:
end_position_2_start_position[end_position] = starting_position
else:
# otherwise just update it
end_position_2_start_position[end_position] = starting_position
return end_position_2_start_position
def complementary(string):
replacement_dict = {"A": "T", "T": "A", "C": "G", "G": "C"}
# https://stackoverflow.com/questions/6116978/how-to-replace-multiple-substrings-of-a-string
rep = dict((re.escape(k), v) for k, v in replacement_dict.items())
pattern = re.compile("|".join(rep.keys()))
return pattern.sub(lambda m: rep[re.escape(m.group(0))], string)
def get_orfs_inner(genome):
LEN_GENOME = len(genome)
starting_positions = find_starting_positions(
genome + genome[0:2]
) # todo handle edge case start stop is in the middle
starting_positions_phases = positions_to_phase(starting_positions)
end_positions = find_end_positions(genome + genome[0:2])
end_positions_phases = positions_to_phase(end_positions)
print("starting_positions", starting_positions)
print("starting_positions_phases", starting_positions_phases)
print("endpositions_phases", end_positions_phases)
return find_longest_matches(
LEN_GENOME, end_positions_phases, starting_positions_phases
)
def dict_to_sequence(genome, start, end):
if start < end:
return genome[start:end]
else:
return genome[start:] + genome[:end]
def construct_tuple(start, end, genome, right_direction):
seq = codons_to_aa(
dict_to_sequence(genome, start, end)
) # include the whole last triple
if seq is None:
x = 1
if len(seq) <= 33:
raise ValueError()
end += 2
if not right_direction:
start = len(genome) - start - 1
end = len(genome) - end - 1
return (start, end, seq, not right_direction)
def get_orfs(genome):
validate(genome)
end_position_2_start_position_right_way = get_orfs_inner(genome)
print(end_position_2_start_position_right_way)
left_genome = complementary(genome)[::-1]
end_position_2_start_position_left_way = get_orfs_inner(left_genome)
orf_list = []
for end, start in end_position_2_start_position_right_way.items():
try:
orf_list.append(construct_tuple(start, end, genome, True))
except ValueError:
pass
for end, start in end_position_2_start_position_left_way.items():
try:
orf_list.append(construct_tuple(start, end, left_genome, False))
except ValueError:
pass
return orf_list
<file_sep>##############
# Exercise 2.6
##############
from collections import Counter
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
return len(self.__sequences)
def get_average_length(self):
return sum([len(i) for i in self.__sequences]) / self.get_counts()
def read_fasta(self, filename):
with open(filename, "r") as f:
seq = ""
sequence_started = False
for line in f:
if line.startswith(">") or line.startswith(";"):
if sequence_started:
self.__sequences.append(seq)
seq = ""
sequence_started = False
continue
sequence_started = True
seq += line.strip().replace("*", "")
self.__sequences.append(seq)
def get_abs_frequencies(self):
counted = Counter(''.join(self.__sequences))
return counted
def get_av_frequencies(self):
counted = self.get_abs_frequencies()
for key in counted:
counted[key] /= len(''.join(self.__sequences))
return counted
<file_sep>##############
# Exercise 2.5
##############
# You can use the supplied test cases for your own testing. Good luck!
# codon to amino acid
codon_dict = {
'ATA': 'I', 'ATC': 'I', 'ATT': 'I', 'ATG': 'M',
'ACA': 'T', 'ACC': 'T', 'ACG': 'T', 'ACT': 'T',
'AAC': 'N', 'AAT': 'N', 'AAA': 'K', 'AAG': 'K',
'AGC': 'S', 'AGT': 'S', 'AGA': 'R', 'AGG': 'R',
'CTA': 'L', 'CTC': 'L', 'CTG': 'L', 'CTT': 'L',
'CCA': 'P', 'CCC': 'P', 'CCG': 'P', 'CCT': 'P',
'CAC': 'H', 'CAT': 'H', 'CAA': 'Q', 'CAG': 'Q',
'CGA': 'R', 'CGC': 'R', 'CGG': 'R', 'CGT': 'R',
'GTA': 'V', 'GTC': 'V', 'GTG': 'V', 'GTT': 'V',
'GCA': 'A', 'GCC': 'A', 'GCG': 'A', 'GCT': 'A',
'GAC': 'D', 'GAT': 'D', 'GAA': 'E', 'GAG': 'E',
'GGA': 'G', 'GGC': 'G', 'GGG': 'G', 'GGT': 'G',
'TCA': 'S', 'TCC': 'S', 'TCG': 'S', 'TCT': 'S',
'TTC': 'F', 'TTT': 'F', 'TTA': 'L', 'TTG': 'L',
'TAC': 'Y', 'TAT': 'Y', 'TAA': '', 'TAG': '',
'TGC': 'C', 'TGT': 'C', 'TGA': '', 'TGG': 'W',
}
def codons_to_aa(orf):
if len(orf) % 3 is not 0:
print(len(orf))
return None
codons = [orf[i:i + 3] for i in range(0, len(orf), 3)]
aa_seq = ''.join(codon_dict[c] for c in codons)
return aa_seq
# transfer the genome to complemetary strand
def complementary(input):
dict = {"T":"A","A":"T","C":"G","G":"C"}
str=""
for c in input:
str=str+dict[c]
return str
def get_orfs_from_string(genome):
orfs = []
start = {}
stop = {}
start_codons = {"ATG"}
stop_codons = {"TAA", "TAG", "TGA"}
# get 3 strand and start stop codon index, store the index list into a dictionary
for s in range(0, 3, 1):
codon_list = []
for i in range(s, len(genome), 3):
codon_list.append(genome[i:i + 3])
start_index = [i * 3 + s for i, v in enumerate(codon_list) if v in start_codons]
stop_index = [i * 3 + 2 + s for i, v in enumerate(codon_list) if v in stop_codons]
start[str(s)] = start_index
stop[str(s)] = stop_index
pre_dict={} # store the pretent combind start index
for s in range(0, 3, 1):
start_index = start[str(s)]
stop_index = stop[str(s)]
resi = (3 - (len(genome) - s) % 3) % 3
stop_resi = stop[str(resi)] # in case of circle
current_stop_index = 0
# for the first index, just add to pre_dict if the first start index is smaller than the first stop index
if (start_index[0]<stop_index[0]):
if (str(s) in pre_dict.keys()):
pre_dict[str(s)].append(start_index[0])
else:
pre_dict[str(s)] = [start_index[0]]
# skip check first stop index
for sa in start_index:
# if the start index is bigger than current stop index, we should move current stop index to find next stop index
if sa > stop_index[current_stop_index]:
for i in range(current_stop_index, len(stop_index)):
# if the stop index is greater than start index, then assume we find the orf, put the current index here
if stop_index[i] > sa:
current_stop_index = i
# check whether the translated orf is greater than 33
protein = codons_to_aa(genome[sa:stop_index[i] - 2])
if (len(protein)>33):
orfs.append((sa, stop_index[i], codons_to_aa(genome[sa:stop_index[i] - 2]), False))
break
circle = [v for v in start_index if v >= stop_index[-1]]
if len(circle)>0:
sa = circle[0]
r = stop_resi[0]
if(str(resi) in pre_dict.keys()):
pre_dict[str(resi)].append(sa)
else:pre_dict[str(resi)]=[sa]
# check the start index to deal with overlap in cyclic genome
for k in pre_dict.keys():
max_len = 0
current_max = 0
pre = pre_dict[k]
length = 0
for i in pre:
if i > stop[k][0]:length = len(genome) - i + stop[k][0]
else:length = stop[k][0] - i
if length > max_len:
current_max = i
max_len = length
if current_max>stop[k][0]:
# which means the start index is before the start point
protein = codons_to_aa(genome[current_max:] + genome[:(stop[k][0] - 2)])
if len(protein)>33:orfs.append((current_max, stop[k][0], codons_to_aa(genome[current_max:]+genome[:(stop[k][0] - 2)]), False))
else:
# which means the start index is after the start point
protein = codons_to_aa(genome[current_max:stop[k][0] - 2])
if len(protein)>33:
orfs.append((current_max, stop[k][0], codons_to_aa(genome[current_max:stop[k][0] - 2]), False))
return orfs
def get_orfs_from_rev_comp(genome):
orfs = []
start = {}
stop = {}
start_codons = {"ATG"}
stop_codons = {"TAA", "TAG", "TGA"}
# get 3 strand and start stop codon index, store the index list into a dictionary
for s in range(0, 3, 1):
codon_list = []
for i in range(s, len(genome), 3):
codon_list.append(genome[i:i + 3])
start_index = [i * 3 + s for i, v in enumerate(codon_list) if v in start_codons]
stop_index = [i * 3 + 2 + s for i, v in enumerate(codon_list) if v in stop_codons]
start[str(s)] = start_index
stop[str(s)] = stop_index
pre_dict={} # store the pretent combind start index
for s in range(0, 3, 1):
start_index = start[str(s)]
stop_index = stop[str(s)]
resi = (3 - (len(genome) - s) % 3) % 3
stop_resi = stop[str(resi)] # in case of circle
current_stop_index = 0
index_rev = len(genome)-1
# for the first index, just add to pre_dict if the first start index is smaller than the first stop index
if (start_index[0]<stop_index[0]):
if (str(s) in pre_dict.keys()):
pre_dict[str(s)].append(start_index[0])
else:
pre_dict[str(s)] = [start_index[0]]
# skip check first stop index
for sa in start_index:
# if the start index is bigger than current stop index, we should move current stop index to find next stop index
if sa > stop_index[current_stop_index]:
for i in range(current_stop_index, len(stop_index)):
# if the stop index is greater than start index, then assume we find the orf, put the current index here
if stop_index[i] > sa:
current_stop_index = i
# check whether the translated orf is greater than 33
protein = codons_to_aa(genome[sa:stop_index[i] - 2])
if (len(protein)>33):
orfs.append((index_rev-sa, index_rev-stop_index[i], codons_to_aa(genome[sa:stop_index[i] - 2]), True))
break
circle = [v for v in start_index if v >= stop_index[-1]]
if len(circle)>0:
sa = circle[0]
r = stop_resi[0]
if(str(resi) in pre_dict.keys()):
pre_dict[str(resi)].append(sa)
else:pre_dict[str(resi)]=[sa]
# check the start index to deal with overlap in cyclic genome
for k in pre_dict.keys():
max_len = 0
current_max = 0
pre = pre_dict[k]
length = 0
# find the max length
for i in pre:
if i > stop[k][0]:length = len(genome) - i + stop[k][0]
else:length = stop[k][0] - i
if length > max_len:
current_max = i
max_len = length
if current_max>stop[k][0]:
# which means this start index before the start point
protein = codons_to_aa(genome[current_max:] + genome[:(stop[k][0] - 2)])
if len(protein)>33:orfs.append((index_rev-current_max, index_rev-stop[k][0], codons_to_aa(genome[current_max:]+genome[:(stop[k][0] - 2)]), True))
else:
# which means this start index is after start point
protein = codons_to_aa(genome[current_max:stop[k][0] - 2])
if len(protein)>33:
orfs.append((index_rev-current_max, index_rev-stop[k][0], codons_to_aa(genome[current_max:stop[k][0] - 2]), True))
return orfs
def get_orfs(genome):
if set(genome) != set('ATCG'): raise TypeError
orfs = get_orfs_from_string(genome)
# get the complementray of prime strand and reverse it
comp = complementary(genome)
rev_comp = comp[::-1]
orf_rev = get_orfs_from_rev_comp(rev_comp)
for v in orf_rev:
orfs.append(v)
return orfs
<file_sep>import numpy as np
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
possibleletterlist=['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V', '-']
def getAminoBackground(bg_matrix):
aminoback = np.ones(len(ALPHABET) - 1)
if bg_matrix!=None:
for letter in ALPHABET:
if letter!='-':
letterindex=AA_TO_INT[letter]
sumlet = (np.sum(bg_matrix, axis=1))[letterindex]
aminoback[letterindex] = sumlet
else:
aminoback=aminoback/(len(ALPHABET)-1)
return aminoback
class MSA:
sequences=[]
primaryseq=""
size=(0,0)
sequenceweights=None
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
try:
lenbeg=len(sequences[0])
for seq in sequences:
if len(seq)!=lenbeg:
raise TypeError("")
else:
for c in seq:
if c not in possibleletterlist:
raise TypeError("")
except:
raise TypeError("")
self.sequences=sequences
self.primaryseq= self.sequences[0].replace('-','')
self.size=(len(self.sequences), len(self.sequences[0]))
sequenceweightsmatrix=self.getSequenceWeightMatrix()
self.sequenceweights=np.sum(sequenceweightsmatrix,axis=0)
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
pssm = np.zeros((len(self.sequences[0]), len(ALPHABET)-1))
gapsinprimarysequence=self.getGapsIndicesPrimseq()
backgroundfreq = getAminoBackground(bg_matrix)
for i in range(len(self.sequences[0])):
for letter in ALPHABET:
if letter!='-':
#Count (with weights) observed amino acids and gaps
count = self.getAminoCount(i, letter,use_sequence_weights)
pssm[i, AA_TO_INT[letter]] = count
# Redistribute gaps according to background frequencies
if redistribute_gaps:
numberofgapspercolumnmsa = self.getNumberOfGapsPerColMsa(use_sequence_weights)
pssm=self.redistributeGaps(pssm,numberofgapspercolumnmsa,backgroundfreq)
numberofgapspersequence = self.getNumberOfGapsPerSequence(use_sequence_weights)
#pssm = self.redistributeGaps2(pssm, numberofgapspersequence, backgroundfreq)
#Add weighted pseudocounts
if add_pseudocounts:
pssm=self.applypseudocount(pssm,backgroundfreq,bg_matrix,beta)
'''for j in range(pssm.shape[1]):
pssm[i,j]=(alpha*pssm[i,j]+beta*pseudocountmatrix[i,j])/(alpha+beta)'''
# Normalize to relative frequencies
pssm=self.normalize(pssm)
# Divide by background frequencies
pssm=self.dividebybackground(pssm,backgroundfreq)
# Calculate Log-Score
pssm=self.computelogscore(pssm)
'''sumofrows=np.sum(pssm,axis=1)
for i in range(pssm.shape[0]):
sumrow=sumofrows[i]
for j in range(pssm.shape[1]):
# Normalize to relative frequencies
#Divide by background frequencies
#Calculate Log-Score
value=2*np.log2((pssm[i,j]/sumrow)/backgroundfreq[j])
if value==-np.inf:
pssm[i, j] = -20
else:
pssm[i, j] = value'''
pssm=np.delete(pssm, gapsinprimarysequence, axis=0)
return np.rint(pssm).astype(np.int64)
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return self.size
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
return self.primaryseq
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
weights = self.sequenceweights
return weights.astype(np.float64)
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
num_obs = 0
for i in range(len(self.sequences[0])):
num_obs+=self.getNumberDifferentAminoAcidLetterIndex(i)
num_obs=num_obs/len(self.sequences[0])
#return num_obs.astype(np.float64)
return num_obs
def getAminoCount(self,pos, letter,weights=False):
count=0
for i in range(len(self.sequences)):
if self.sequences[i][pos]==letter:
if weights==True:
count += self.sequenceweights[i]
if weights==False:
count += 1
return count
def getNumberDifferentAminoAcidLetterIndex(self,index):
differentamino=[]
for seq in self.sequences:
if seq[index] not in differentamino:
differentamino.append(seq[index])
return len(differentamino)
def getNumberSequencesSameindexSameAmino(self, index,amino):
numseq=0
for seq in self.sequences:
if seq[index]==amino:
numseq+=1
return numseq
def getSequenceWeightMatrix(self):
sequenceweightsmatrix = np.zeros(shape=(len(self.sequences[0]), len(self.sequences)))
i = 0
while i < len(self.sequences): # i is the index of the sequence
j = 0
while j < len(self.sequences[0]): # j is the index of the letter
r = self.getNumberDifferentAminoAcidLetterIndex(j)
if r == 1:
sequenceweightsmatrix[j] = np.zeros(len(self.sequences))
else:
amino = self.sequences[i][j]
s = self.getNumberSequencesSameindexSameAmino(j, amino)
partweight = 1 / (s * r)
sequenceweightsmatrix[j][i] = partweight
j += 1
i += 1
return sequenceweightsmatrix
def getGapsIndicesPrimseq(self):
indices=[]
for i in range(len(self.sequences[0])):
if self.sequences[0][i]=='-':
indices.append(i)
return indices
def getNumberOfGapsPerColMsa(self,use_sequence_weights):
numberofgapsperseq = [0] * (len(self.sequences[0]))
for i in range(len(self.sequences[0])):
numgaps=0
for j in range(len(self.sequences)):
if self.sequences[j][i]=='-':
if use_sequence_weights:
numgaps+=self.sequenceweights[j]
else:
numgaps+=1
numberofgapsperseq[i]=numgaps
return numberofgapsperseq
def getNumberOfGapsPerSequence(self,use_sequence_weights):
numberofgapsperseq = [0] * len(self.sequences)
for i in range(len(self.sequences)):
numberofgapsperseq[i]=(self.sequences[i].count('-'))
if use_sequence_weights:
numberofgapsperseq[i] = numberofgapsperseq[i]*self.sequenceweights[i]
return numberofgapsperseq
def redistributeGaps(self, pssm, numberofgapspercolumnmsa, backgroundfreq):
for i in range(pssm.shape[0]):
numberofgaps=numberofgapspercolumnmsa[i]
for j in range(pssm.shape[1]):
aminofreq=backgroundfreq[j]
gapaminovalue = numberofgaps*aminofreq
pssm[i, j] += gapaminovalue
return pssm
def redistributeGaps2(self, pssm, numberofgapspersequence, backgroundfreq):
for i in range(pssm.shape[0]):
numberofgaps=numberofgapspersequence[i]
for j in range(pssm.shape[1]):
aminofreq=backgroundfreq[j]
gapaminovalue = numberofgaps*aminofreq
pssm[i, j] += gapaminovalue
return pssm
def applypseudocount(self, pssm,backgroundfreq,bg_matrix,beta):
pseudocountmatrix = np.zeros((len(self.sequences[0]), len(ALPHABET) - 1))
for i in range(pssm.shape[0]):
for a in ALPHABET:
if a != '-':
pseudocount = 0
for j in ALPHABET:
if j != '-':
freq = pssm[i, AA_TO_INT[j]]
bgfreq = backgroundfreq[AA_TO_INT[j]]
if bg_matrix != None:
subfreq = bg_matrix[AA_TO_INT[a]][AA_TO_INT[j]]
else:
subfreq = 1 / ((len(ALPHABET) - 1) * (len(ALPHABET) - 1))
pseudocount += (freq / bgfreq) * subfreq
pseudocountmatrix[i, AA_TO_INT[a]] = pseudocount
alpha = self.get_number_of_observations() - 1
for i in range(pssm.shape[0]):
pssm[i] = (alpha * pssm[i] + beta * pseudocountmatrix[i]) / (alpha + beta)
return pssm
def normalize(self,pssm):
sumofrows = np.sum(pssm, axis=1)
for i in range(pssm.shape[0]):
pssm[i]=pssm[i]/sumofrows[i]
return pssm
def dividebybackground(self, pssm, backgroundfreq):
for j in range(pssm.shape[1]):
pssm[:,j]=pssm[:,j]/backgroundfreq[j]
return pssm
def computelogscore(self, pssm):
for i in range(pssm.shape[0]):
for j in range(pssm.shape[1]):
value = 2 * np.log2(pssm[i, j])
if value == -np.inf:
pssm[i, j] = -20
else:
pssm[i, j] = value
return pssm
<file_sep>##############
# Exercise 2.6
##############
numberOfSequences = 0
totalLength = 0
avgLength = 0
fastaData = ''
aminoSeq = ""
# amino_abs_freq = {
# 'A': 0,
# 'R': 0,
# 'N': 0,
# 'D': 0,
# 'C': 0,
# 'E': 0,
# 'Q': 0,
# 'G': 0,
# 'H': 0,
# 'I': 0,
# 'L': 0,
# 'K': 0,
# 'M': 0,
# 'F': 0,
# 'P': 0,
# 'S': 0,
# 'T': 0,
# 'W': 0,
# 'Y': 0,
# 'V': 0
# }
#
# amino_av_freq = {
# 'A': 0,
# 'R': 0,
# 'N': 0,
# 'D': 0,
# 'C': 0,
# 'E': 0,
# 'Q': 0,
# 'G': 0,
# 'H': 0,
# 'I': 0,
# 'L': 0,
# 'K': 0,
# 'M': 0,
# 'F': 0,
# 'P': 0,
# 'S': 0,
# 'T': 0,
# 'W': 0,
# 'Y': 0,
# 'V': 0
# }
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
global numberOfSequences
return numberOfSequences
def get_average_length(self):
global totalLength
global aminoSeq
joinedData = "".join(fastaData)
for line in joinedData.splitlines():
if line.startswith('>'):
aminoSeq += ''
else:
aminoSeq += line
totalLength = len(aminoSeq) - aminoSeq.count('*')
return totalLength / numberOfSequences
def read_fasta(self, path):
global fastaData
global numberOfSequences
with open(path, 'r') as myfile:
fastaData = myfile.read()
for line in fastaData:
if line.startswith(">"):
numberOfSequences += 1
def get_abs_frequencies(self):
# return number of occurences not normalized by length
amino_abs_freq = {
'A': 0,
'R': 0,
'N': 0,
'D': 0,
'C': 0,
'E': 0,
'Q': 0,
'G': 0,
'H': 0,
'I': 0,
'L': 0,
'K': 0,
'M': 0,
'F': 0,
'P': 0,
'S': 0,
'T': 0,
'W': 0,
'Y': 0,
'V': 0
}
for a in aminoSeq:
if a != '*':
amino_abs_freq[a] += 1
print(amino_abs_freq)
return amino_abs_freq
def get_av_frequencies(self):
# return number of occurences normalized by length
amino_av_freq = {
'A': 0,
'R': 0,
'N': 0,
'D': 0,
'C': 0,
'E': 0,
'Q': 0,
'G': 0,
'H': 0,
'I': 0,
'L': 0,
'K': 0,
'M': 0,
'F': 0,
'P': 0,
'S': 0,
'T': 0,
'W': 0,
'Y': 0,
'V': 0
}
amino_abs_freq = self.get_abs_frequencies()
print(amino_abs_freq)
for a in amino_abs_freq:
amino_av_freq[a] = amino_abs_freq[a] / totalLength
print(amino_av_freq)
return amino_av_freq
#
# test = AADist('tests/tests.fasta')
# test.read_fasta('tests/tests.fasta')
# test.get_counts()
# test.get_average_length()
# test.get_av_frequencies()
# test.get_abs_frequencies()
<file_sep>import numpy as np
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
ALPHABET1 = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT_1 = {aa:index for index, aa in enumerate(ALPHABET1)}
INT_TO_AA_1 = {index:aa for index, aa in enumerate(ALPHABET1)}
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
self.seqList = sequences
if not(self.checkSeqLen() and self.checkseq() and self.checkchar()):
raise TypeError()
def checkchar(self):
for sequence in self.seqList:
for character in sequence:
if character not in ALPHABET:
return False
return True
def checkSeqLen(self):
if len(self.seqList) > 0:
return True
else:
return False
def checkseq(self):
if len(self.seqList[0])>0:
for seq in self.seqList:
if len(seq) != len(self.seqList[0]):
return False
else:
return False
return True
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap positionition in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
Primarylist = self.PrimaryIndList()
pssm = np.zeros(shape=(len(Primarylist),21))
if(use_sequence_weights==False):
for sequence in self.seqList:
for row in range(0,len(Primarylist)):
pssm[row][AA_TO_INT[sequence[Primarylist[row]]]] = pssm[row][AA_TO_INT[sequence[Primarylist[row]]]]+1
else:
seq_wts = self.get_sequence_weights()
index = 0
for sequence in self.seqList:
for row in range(0, len(Primarylist)):
pssm[row][AA_TO_INT[sequence[Primarylist[row]]]] += seq_wts[index]
index =index+ 1
if(redistribute_gaps==True):
for row in range(0, len(Primarylist)):
gap = pssm[row][20]
if gap > 0:
for col in range(0, 20):
if(bg_matrix==None):
chk=0.05
else:
bg_array1 = np.sum(bg_matrix,axis=0)
bg_array = np.zeros(20)
for index in range(0,20):
bg_array[index] = bg_array1[AA_TO_INT_1[INT_TO_AA[index]]]
chk=bg_array[col]
pssm[row][col] =pssm[row][col]+ gap*chk
pssm = pssm[:, :-1]
if(add_pseudocounts==True):
alpha = self.get_number_of_observations() - 1
if(bg_matrix==None):
pseudo_g = self.mat_pseudo(pssm,None,bg_matrix,False)
else:
bg_array1 = np.sum(bg_matrix,axis=0)
bg_array = np.zeros(20)
for index in range(0,20):
bg_array[index] = bg_array1[AA_TO_INT_1[INT_TO_AA[index]]]
pseudo_g = self.mat_pseudo(pssm,bg_array,bg_matrix,True)
pssm_new = np.zeros(shape=(pseudo_g.shape[0],pseudo_g.shape[1]))
for row in range(0,pssm_new.shape[0]):
pssm_new[row] = (alpha*pssm[row] + beta*pseudo_g[row])/(alpha+beta)
pssm = pssm_new
sum = np.sum(pssm,axis=1)
pssm = pssm/sum[:,None]
if(bg_matrix==None):
pssm=pssm/0.05
else:
# self.bg_matrix=bg_matrix
bg_array1 = np.sum(bg_matrix,axis=0)
bg_array = np.zeros(20)
for index in range(0,20):
bg_array[index] = bg_array1[AA_TO_INT_1[INT_TO_AA[index]]]
# self.bg_array=bg_array
for row in range(0, len(Primarylist)):
for col in range(0, 20):
pssm[row][col] = pssm[row][col] / bg_array[col]
pssm = 2 * np.log2(pssm)
pssm[pssm == -np.infty] = -20
pssm=np.rint(pssm).astype(np.int64)
return pssm
def mat_pseudo(self,pssm,bg_array,bg_matrix,flag):
pseudo_g = np.zeros(shape=(pssm.shape[0],20))
for row in range(0,pseudo_g.shape[0]):
for col in range(0,pseudo_g.shape[1]):
sum = 0
for pssm_col in range(0,20):
char1 = INT_TO_AA[col]
char2 = INT_TO_AA[pssm_col]
chk1=None
chk2=None
if(flag):
chk1=bg_matrix[AA_TO_INT_1[char1]][AA_TO_INT_1[char2]]
chk2=bg_array[pssm_col]
else:
chk1=(1/400)
chk2=0.05
sum += (pssm[row][pssm_col]*chk1)/chk2
pseudo_g[row][col] = sum
return pseudo_g
def PrimaryIndList(self):
primary_seq = self.seqList[0]
indexList = []
for index,char in enumerate(primary_seq):
if char is not '-':
indexList.append(index)
return indexList
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
number = len(self.seqList)
l = len(self.seqList[0])
return (number, l)
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
nogap = self.seqList[0].replace("-","")
return nogap
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
pssm = np.zeros(shape=(len(self.seqList[0]), 21))
for seq in self.seqList:
for rowindex in range(0, len(self.seqList[0])):
pssm[rowindex][AA_TO_INT[seq[rowindex]]] += 1
pssm = np.rint(pssm).astype(np.int64)
r = np.count_nonzero(pssm,axis=1)
length = len(np.nonzero(pssm)[0])
no_zero = np.nonzero(pssm)
no_zero_Map = {}
for position in range(0,length):
if no_zero[0][position] not in no_zero_Map:
newMap = {INT_TO_AA[no_zero[1][position]]:pssm[no_zero[0][position]][no_zero[1][position]]}
no_zero_Map[no_zero[0][position]] = newMap
else:
position_Map = no_zero_Map[no_zero[0][position]]
position_Map[INT_TO_AA[no_zero[1][position]]] = pssm[no_zero[0][position]][no_zero[1][position]]
wt_matrix = np.zeros(shape=(len(self.seqList[0]),len(self.seqList)))
for row in range(0,len(self.seqList[0])):
for col in range(0,len(self.seqList)):
if r[row] != 1:
charpos = self.seqList[col][row]
wt_matrix[row][col] = 1/(r[row]*no_zero_Map[row][charpos])
summation = np.sum(wt_matrix,axis=0)
return summation.astype(np.float64)
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
pssm = np.zeros(shape=(len(self.seqList[0]), 21))
for seq in self.seqList:
for rowindex in range(0,len(self.seqList[0])):
pssm[rowindex][AA_TO_INT[seq[rowindex]]] +=1
print(pssm)
pssm = np.rint(pssm).astype(np.int64)
r = np.count_nonzero(pssm, axis=1)
observation = np.sum(r)/len(self.seqList[0])
return np.float64(observation)
<file_sep>##############
# Exercise 2.5
##############
# You can use the supplied test cases for your own testing. Good luck!
import string
def complementary(str1):
input1 = "ATGC"
output = "TACG"
trantab = str.maketrans(input1, output)
return str1.translate(trantab)
def get_orfs(genome):
pass
valid_DNA="ACTG"
orf=[]
start=0
stop=0
complement=complementary(genome)
flag= False
try:
for i in genome:
if i not in valid_DNA:
raise TypeError("Inavlid DNA")
except TypeError:
print( 'An exception flew by!')
raise
for i in range(len(list(genome))):
if genome[i:i+3]=="ATG":
start=i
elif (genome[i:i+3]=="TAA" or genome[i:i+3]=="TAG" or genome[i:i+3]== "TGA"):
stop=i+1
for j in range(start,stop):
if len(genome[start:stop])>99 and len(genome[start:stop]) % 3 is 0:
orf.append((start,stop,codons_to_aa(genome[start:stop]),flag))
if genome[0:3] == genome[len(genome):len(genome)-3]:
for i in range(len(list(genome)),0):
if genome[i:i+3]=="ATG":
start=i
elif (genome[i:i+3]=="TAA" or genome[i:i+3]=="TAG" or genome[i:i+3]== "TGA"):
stop=i+1
for j in range(start,stop):
if len(genome[start:stop])>99 and len(genome[start:stop]) % 3 is 0:
orf.append((start,stop,codons_to_aa(genome[start:stop]),flag))
for i in range(0,len(list(genome))):
if genome[i:i+3]=="ATG":
start=i
elif (genome[i:i+3]=="TAA" or genome[i:i+3]=="TAG" or genome[i:i+3]== "TGA"):
stop=i+1
for j in range(start,stop):
if len(genome[start:stop])>99 and len(genome[start:stop]) % 3 is 0:
orf.append((start,stop,codons_to_aa(genome[start:stop]),flag))
for i in range(len(list(complement))):
if complement[i:i+3]=="GTA":
start=i
flag=True
elif (complement[i:i+3]=="AAT" or complement[i:i+3]=="GAT" or complement[i:i+3]== "AGT"):
stop=i+1
for j in range(start,stop):
if len(complement[start:stop])>99 and len(complement[start:stop]) % 3 is 0:
orf.append((stop,start,codons_to_aarev(complement[start:stop]),flag))
if complement[0:3] == complement[len(complement):len(complement)-3]:
for i in range(len(list(complement)),0):
if complement[i:i+3]=="GTA":
start=i
flag=True
elif (complement[i:i+3]=="AAT" or complement[i:i+3]=="GAT" or complement[i:i+3]== "AGT"):
stop=i+1
for j in range(start,stop):
if len(complement[start:stop])>99 and len(complement[start:stop]) % 3 is 0:
orf.append((stop,start,codons_to_aarev(complement[start:stop]),flag))
for i in range(0,len(list(complement))):
if complement[i:i+3]=="GTA":
start=i
flag=True
elif (complement[i:i+3]=="AAT" or complement[i:i+3]=="GAT" or complement[i:i+3]== "AGT"):
stop=i+1
for j in range(start,stop):
if len(complement[start:stop])>99 and len(complement[start:stop]) % 3 is 0:
orf.append((stop,start,codons_to_aarev(complement[start:stop]),flag))
return orf
def codons_to_aa(orf):
codon_dict = {"TTT":"F", "TTC":"F", "TTA":"L", "TTG":"L",
"TCT":"S", "TCC":"S", "TCA":"S", "TCG":"S",
"TAT":"Y", "TAC":"Y", "TAA":"STOP", "TAG":"STOP",
"TGT":"C", "TGC":"C", "TGA":"STOP", "TGG":"W",
"CTT":"L", "CTC":"L", "CTA":"L", "CTG":"L",
"CCT":"P", "CCC":"P", "CCA":"P", "CCG":"P",
"CAT":"H", "CAC":"H", "CAA":"Q", "CAG":"Q",
"CGT":"R", "CGC":"R", "CGA":"R", "CGG":"R",
"ATT":"I", "ATC":"I", "ATA":"I", "ATG":"M",
"ACT":"T", "ACC":"T", "ACA":"T", "ACG":"T",
"AAT":"N", "AAC":"N", "AAA":"K", "AAG":"K",
"AGT":"S", "AGC":"S", "AGA":"R", "AGG":"R",
"GTT":"V", "GTC":"V", "GTA":"V", "GTG":"V",
"GCT":"A", "GCC":"A", "GCA":"A", "GCG":"A",
"GAT":"D", "GAC":"D", "GAA":"E", "GAG":"E",
"GGT":"G", "GGC":"G", "GGA":"G", "GGG":"G"}
if len(orf) % 3 is not 0:
return None
codons = [orf[i:i+3] for i in range(0, len(orf), 3)]
aa_seq = str("".join(codon_dict[c] for c in codons))
return aa_seq
def codons_to_aarev(orf):
codon_dict = {"TTT":"F", "CTT":"F", "ATT":"L", "GTT":"L",
"TCT":"S", "CCT":"S", "ACT":"S", "GCT":"S",
"TAT":"Y", "CAT":"Y", "AAT":"STOP", "GAT":"STOP",
"TGT":"C", "CGT":"C", "AGT":"STOP", "GGT":"W",
"TTC":"L", "CTC":"L", "ATC":"L", "GTC":"L",
"TCC":"P", "CCC":"P", "ACC":"P", "GCC":"P",
"TAC":"H", "CAC":"H", "AAC":"Q", "GAC":"Q",
"TGC":"R", "CGC":"R", "AGC":"R", "GGC":"R",
"TTA":"I", "CTA":"I", "ATA":"I", "GTA":"M",
"TCA":"T", "CCA":"T", "ACA":"T", "GCA":"T",
"TAA":"N", "CAA":"N", "AAA":"K", "GAA":"K",
"TGA":"S", "CGA":"S", "AGA":"R", "GGA":"R",
"TTG":"V", "CTG":"V", "ATG":"V", "GTG":"V",
"TCG":"A", "CCG":"A", "ACG":"A", "GCG":"A",
"TAG":"D", "CAG":"D", "AAG":"E", "GAG":"E",
"TGG":"G", "CGG":"G", "AGG":"G", "GGG":"G"}
if len(orf) % 3 is not 0:
return None
codons = [orf[i:i+3] for i in range(0, len(orf), 3)]
aa_seqrev = str("".join(codon_dict[c] for c in codons))
return aa_seqrev
<file_sep>import numpy as np
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
# Copy list of sequences
self.msa_seqs = list(sequences)
# Number of sequences in MSA
self.num_cols = len(self.msa_seqs)
# Length of MSA
self.num_rows = 0 if self.num_cols == 0 else len(self.msa_seqs[0])
# Check for correctness
if self.num_rows == 0 or not self.check_sequences():
raise TypeError('Invalid MSA.')
# MSA as matrix: each row is a MSA column, each column a sequence
self.msa_mat = self.make_msa_matrix()
# Absolute counts of amino acids per MSA column
self.cnt_mat = self.make_count_matrix()
# Number of different amino acids per MSA column
self.var_arr = self.calculate_variation()
# Sequence weights
self.wgt_arr = self.calculate_weights()
# Estimate for number of observations
self.num_obs = np.mean(self.var_arr)
# Remove columns with gaps in primary sequence
self.cnt_mat = self.cnt_mat[self.msa_mat[:, 0] != GAP_INDEX]
self.msa_mat = self.msa_mat[self.msa_mat[:, 0] != GAP_INDEX]
def check_sequences(self):
length = self.num_rows
aa_set = set(ALPHABET)
for seq in self.msa_seqs:
if len(seq) != length:
return False
for c in seq:
if c not in aa_set:
return False
return True
def make_msa_matrix(self):
msa_matrix = np.zeros((self.num_rows, self.num_cols), dtype=np.int64)
for col, seq in enumerate(self.msa_seqs):
for row, aa in enumerate(seq):
msa_matrix[row][col] = AA_TO_INT[aa]
return msa_matrix
def make_count_matrix(self):
count_matrix = np.apply_along_axis(np.bincount,
axis=1,
arr=self.msa_mat,
minlength=21)
return count_matrix
def make_weighted_count_matrix(self):
count_matrix = np.apply_along_axis(np.bincount,
axis=1,
arr=self.msa_mat,
weights=self.wgt_arr,
minlength=21)
return count_matrix
def calculate_variation(self):
var_array = np.count_nonzero(self.cnt_mat, axis=1)
return var_array
def calculate_weights(self):
idx = np.arange(self.msa_mat.shape[0])
weight_matrix = self.cnt_mat[idx, self.msa_mat.T]
weight_matrix = np.einsum('ij,j->ji', weight_matrix, self.var_arr)
weight_matrix = 1.0 / weight_matrix
weight_matrix = weight_matrix[self.var_arr > 1]
if weight_matrix.size == 0:
return np.ones(self.num_cols)
else:
return np.einsum('ij->j', weight_matrix)
def redistribute_gaps(self, count_matrix, bg_array):
gap_matrix = np.einsum('i,j->ij', count_matrix[:, GAP_INDEX], bg_array)
count_matrix = np.delete(count_matrix, GAP_INDEX, axis=1) + gap_matrix
return count_matrix
def make_pssm(self, count_matrix, bg_array):
row_sums = np.einsum('ij->i', count_matrix)
basic_profile = count_matrix / row_sums[:, np.newaxis]
basic_profile = 2 * np.ma.log2(basic_profile / bg_array).filled(-10)
return np.rint(basic_profile).astype(np.int64)
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
if bg_matrix is None:
bg_matrix = np.full((20, 20), 0.0025)
bg_array = np.einsum('ij->i', bg_matrix)
if use_sequence_weights:
count_matrix = self.make_weighted_count_matrix()
else:
count_matrix = self.cnt_mat
if redistribute_gaps:
count_matrix = self.redistribute_gaps(count_matrix, bg_array)
else:
count_matrix = np.delete(count_matrix, GAP_INDEX, axis=1)
if add_pseudocounts:
alpha = self.num_obs - 1
pseudocounts = count_matrix / bg_array
pseudocounts = np.einsum('ji,ik->jk', pseudocounts, bg_matrix)
count_matrix = alpha*count_matrix + beta*pseudocounts
count_matrix = count_matrix / (alpha+beta)
return self.make_pssm(count_matrix, bg_array)
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return (self.num_cols, self.num_rows)
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
return self.msa_seqs[0].replace('-', '')
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
return self.wgt_arr.astype(np.float64)
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
return self.num_obs.astype(np.float64)
<file_sep>import numpy as np
from util import subtract
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.rows = len(string2) + 1
self.cols = len(string1) + 1
self.gap_penalty = gap_penalty
self.subs = matrix
self.scores = np.zeros((self.rows, self.cols), dtype=np.int)
self.align()
def align(self):
"""
Align given strings using the Smith-Waterman algorithm.
NB: score matrix and the substitution matrix are different matrices!
"""
for i, j in np.ndindex(self.scores.shape):
if i != 0 and j != 0:
letter1 = self.string1[j-1]
letter2 = self.string2[i-1]
score_cands = [
self.scores[i-1, j-1] + self.subs[letter2][letter1],
self.scores[i-1, j] + self.gap_penalty,
self.scores[i, j-1] + self.gap_penalty,
0
]
self.scores[i, j] = max(score_cands)
def has_alignment(self):
"""
:return: True if a local alignment has been found, False otherwise
"""
return self.get_alignment() != ("", "")
def get_alignment(self):
"""
:return: alignment represented as a tuple of aligned strings
"""
# start from point with maximum value
result = ("", "", np.unravel_index(self.scores.argmax(), self.scores.shape))
self.alignment_points = []
# add candidates according to scores matrix
while True:
aligned1, aligned2, point = result
self.alignment_points.append(point)
if self.scores[point] == 0:
return result[0:2]
else:
letter1 = self.string1[point[1] - 1]
letter2 = self.string2[point[0] - 1]
if self.scores[point] - self.scores[subtract(point, (1, 1))] == self.subs[letter1][letter2]:
result = (letter1 + aligned1, letter2 + aligned2, subtract(point, (1, 1)))
elif self.scores[point] - self.scores[subtract(point, (1, 0))] == self.gap_penalty:
result = ("-" + aligned1, letter2 + aligned2, subtract(point, (1, 0)))
elif self.scores[point] - self.scores[subtract(point, (0, 1))] == self.gap_penalty:
result = (letter1 + aligned1, "-" + aligned2, subtract(point, (0, 1)))
def is_residue_aligned(self, string_number, residue_index):
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been alined
False otherwise
"""
# get_alignment populates `self.alignment_points`
self.get_alignment()
string_number = 0 if string_number == 2 else 1
return any(residue_index == x[string_number] for x in self.alignment_points)
<file_sep>##############
# Exercise 2.5
##############
# You can use the supplied test cases for your own testing. Good luck!
def get_orfs(genome):
if len(genome) % 3 is not 0:
raise TypeError("Is not a DNA sequence.")
<file_sep>##############
# Exercise 2.6
##############
from genetic_code import paa_dist, aa_dist_nor
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
static_counts: int = 0
file = ""
def __init__(self, filepath):
self.read_fasta(filepath)
def get_counts(self):
return AADist.static_counts
def get_average_length(self):
return len(AADist.file)/AADist.static_counts
def read_fasta(self,path):
AADist.static_counts = 0
AADist.file =""
with open(path,"r") as f:
for line in f:
if line.startswith(">"):
AADist.static_counts +=1
elif line.startswith(";"):
continue
else:
AADist.file += line
AADist.file = AADist.file.replace("\n","")
AADist.file = AADist.file.replace("*", "")
def get_abs_frequencies(self):
return paa_dist(AADist.file)
def get_av_frequencies(self):
return aa_dist_nor(AADist.file)
<file_sep>import numpy as np
import json
import itertools
from pathlib import Path
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.sequences = []
self.all_words = []
self.different_words_num_for_each_sequence = []
self.num_sequences_containing_each_word = {}
self.list_containing_word = []
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
self.sequences.append(sequence)
# print(sequence)
current_words = []
for i in range(0, len(sequence) - 2):
current_words.append(sequence[i:i + 3])
current_words = list(set(current_words))
for word in current_words:
if word in self.num_sequences_containing_each_word:
self.num_sequences_containing_each_word[word] = self.num_sequences_containing_each_word[word] + 1
else:
self.num_sequences_containing_each_word[word] = 1
self.different_words_num_for_each_sequence.append(len(current_words))
self.all_words.extend(current_words)
self.all_words = list(set(self.all_words))
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
for sequence in self.sequences:
if word in sequence:
self.list_containing_word.append(sequence)
return self.list_containing_word
# return ['NOPE']
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
num_of_sequences = len(self.sequences)
num_of_different_words = len(self.all_words)
avg_num_words_per_sequence = round(np.mean(self.different_words_num_for_each_sequence))
sum = 0
for value in self.num_sequences_containing_each_word.values():
sum = sum + value
avg_num_sequences_per_word = round(sum/len(self.num_sequences_containing_each_word.keys()))
t = (num_of_sequences, num_of_different_words, int(avg_num_words_per_sequence), avg_num_sequences_per_word)
print('values', t)
return t
# (7104, 1646, 485.0, 431)
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.substitution_matrix = substitution_matrix
def score(self, word, keyword):
sum1 = self.substitution_matrix[AA_TO_INT[word[0]]][AA_TO_INT[keyword[0]]]
sum2 = self.substitution_matrix[AA_TO_INT[word[1]]][AA_TO_INT[keyword[1]]]
sum3 = self.substitution_matrix[AA_TO_INT[word[2]]][AA_TO_INT[keyword[2]]]
return sum1 + sum2 + sum3
def score_for_pssm(self, word, keyword):
sum = 0
for i in range(3):
position = AA_TO_INT[keyword[i]]
sum = sum + word[i][position]
return sum
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
good_words = []
if(sequence):
current_words = []
for i in range(0, len(sequence) - 2):
current_words.append(sequence[i:i + 3])
current_words = list(set(current_words))
keywords = [''.join(i) for i in itertools.product(ALPHABET, repeat = 3)]
for word in current_words:
to_remove = []
for keyword in keywords:
if self.score(word,keyword) >= T:
good_words.append(keyword)
#do not double check
to_remove.append(keyword)
for i in to_remove:
keywords.remove(i)
else:
current_words = []
for i in range(0, len(pssm) - 2):
current_words.append(pssm[i:i + 3])
keywords = [''.join(i) for i in itertools.product(ALPHABET, repeat=3)]
for word in current_words:
to_remove = []
for keyword in keywords:
if self.score_for_pssm(word, keyword) >= T:
good_words.append(keyword)
# do not double check
to_remove.append(keyword)
for i in to_remove:
keywords.remove(i)
return good_words
# return ['AAA', 'YYY']
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]
<file_sep>import numpy as np
import itertools
from pathlib import Path
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.sequences=[]
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
self.sequences.append(sequence)
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
words=[]
for seq in self.sequences:
if(word in seq):
words.append(seq)
return words
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
sequence=0
#b=Blast(self.substitution_matrix)
words=0
wordlist=[]
flatten=0
wordcount = {}
#wording=[]
for seq in self.sequences:
sequence=sequence+1
#print (seq)
#wordlist.append(b.get_words(self, seq))
newlist=[]
for i in range(len(seq)-3+1):
wordlist.append(seq[i:i+3])
newlist.append(seq[i:i+3])
#flatten=list(wordlist)
#wording=list(wordlist)
newlist=list(set(newlist))
wordlist=list(set(wordlist))
flatten+=len(newlist)
for word in newlist:
if word not in wordcount:
wordcount[word] = 1
else:
wordcount[word] += 1
words=len(wordlist)
#print(flatten)
wordsperseq=flatten/sequence
seqperword=sum(wordcount.values())/words
#print(sequence,words, round(wordsperseq), round(seqperword))
return (sequence,words, round(wordsperseq), round(seqperword))
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.substitution_matrix=substitution_matrix
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
seeds = []
data = list(ALPHABET)
triplets = list(itertools.product(data, repeat=3))
final_triplets = []
for triplet in triplets:
final_triplets.append(''.join(triplet))
if np.all(pssm!=None):
rows = pssm.shape[0]
for triplet in final_triplets:
for row in range(0, rows - 2):
score = 0
r1 = row
r2 = row + 1
r3 = row + 2
c1 = AA_TO_INT[triplet[0]]
c2 = AA_TO_INT[triplet[1]]
c3 = AA_TO_INT[triplet[2]]
score = pssm[r1][c1] + pssm[r2][c2] + pssm[r3][c3]
if score>=T:
seeds.append(triplet)
return np.unique(seeds)
if sequence!=None:
words=[]
for i in range(len(sequence)-3+1):
words.append(sequence[i:i+3])
words=list(set(words))
seed_word=[]
for w in words:
for i in ALPHABET:
for j in ALPHABET:
for k in ALPHABET:
newword=i+j+k
score=self.substitution_matrix[AA_TO_INT[w[0]],AA_TO_INT[newword[0]]]+self.substitution_matrix[AA_TO_INT[w[1]],AA_TO_INT[newword[1]]]+self.substitution_matrix[AA_TO_INT[w[2]],AA_TO_INT[newword[2]]]
if(score>=T):
seed_word.append(newword)
seed_word=list(set(seed_word))
return seed_word
def get_pos_word(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
seeds = []
data = list(ALPHABET)
triplets = list(itertools.product(data, repeat=3))
final_triplets = []
for triplet in triplets:
final_triplets.append(''.join(triplet))
if sequence!= None:
# words=[]
# words.append(sequence[i:i+3])
# words=list(set(words))
seed_word=[]
position=[]
for dy in range(len(sequence)-3+1):
for i in ALPHABET:
for j in ALPHABET:
for k in ALPHABET:
w=sequence[dy:dy+3]
newword=i+j+k
score=self.substitution_matrix[AA_TO_INT[w[0]],AA_TO_INT[newword[0]]]+self.substitution_matrix[AA_TO_INT[w[1]],AA_TO_INT[newword[1]]]+self.substitution_matrix[AA_TO_INT[w[2]],AA_TO_INT[newword[2]]]
if(score>=T):
seed_word.append(newword)
position.append(dy)
return (seed_word,position)
if np.all(pssm!=None):
pos=[]
rows = pssm.shape[0]
for triplet in final_triplets:
for row in range(0, rows - 2):
score = 0
r1 = row
r2 = row + 1
r3 = row + 2
c1 = AA_TO_INT[triplet[0]]
c2 = AA_TO_INT[triplet[1]]
c3 = AA_TO_INT[triplet[2]]
score = pssm[r1][c1] + pssm[r2][c2] + pssm[r3][c3]
if score>=T:
seeds.append(triplet)
pos.append(r1)
return (seeds,pos)
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
if(query!=None):
words,pos=self.get_pos_word(sequence=query,pssm=None,T=T)
for i,word in enumerate(words):
p=pos[i]
squenc=blast_db.get_sequences(word)
for seq in squenc:
positions=[]
for i in range(len(seq)-2):
if(seq[i:i+3]==word):
positions.append(i)
for x in positions:
maxone=self.substitution_matrix[AA_TO_INT[word[0]],AA_TO_INT[query[p]]] +self.substitution_matrix[AA_TO_INT[word[1]],AA_TO_INT[query[p+1]]]+self.substitution_matrix[AA_TO_INT[word[2]],AA_TO_INT[query[p+2]]]
right_query=p+2
hsp_right_seq=x+2
left_one_hsp=p
left_seq_hsp=x
### in right
if(len(seq)>x+3 and len(query)>p+3):
ext=seq[x+3:]
s=maxone
countt=p+3
counts=0
while(True):
chg=self.substitution_matrix[AA_TO_INT[ext[counts]],AA_TO_INT[query[countt]]]
s+=chg
if(s>maxone):
maxone=s
right_query=countt
hsp_right_seq=x+3+counts
counts+=1
countt+=1
if(maxone-s>=X or len(ext)==counts or len(query)==countt):
break
### in left
if(x>0 and p>0):
ext=seq[0:x]
s=maxone
countt=p-1
counts=-1
# maxone=s
while(True):
chg=self.substitution_matrix[AA_TO_INT[ext[counts]],AA_TO_INT[query[countt]]]
s+=chg
if(s>maxone):
maxone=s
left_one_hsp=countt
left_seq_hsp=x+counts
counts-=1
countt-=1
if(maxone-s>=X or -1*counts>len(ext) or countt<0):
break
if(maxone>=S):
tup=(left_one_hsp,left_seq_hsp,hsp_right_seq-left_seq_hsp+1,maxone)
if(seq not in d):
d[seq]=[tup]
else:
if(tup not in d[seq]):
d[seq].append(tup)
#################################################################3
else:
words,pos=self.get_pos_word(sequence=None,pssm=pssm,T=T)
for i,word in enumerate(words):
p=pos[i]
squenc=blast_db.get_sequences(word)
for seq in squenc:
positions=[]
for i in range(len(seq)-2):
if(seq[i:i+3]==word):
positions.append(i)
for x in positions:
maxone=pssm[p,AA_TO_INT[word[0]]] +pssm[p+1,AA_TO_INT[word[1]]]+pssm[p+2,AA_TO_INT[word[2]]]
right_query=p+2
hsp_right_seq=x+2
left_one_hsp=p
left_seq_hsp=x
### in right
if(len(seq)>x+3 and pssm.shape[0]>p+3):
ext=seq[x+3:]
s=maxone
countt=p+3
counts=0
while(True):
chg=pssm[countt,AA_TO_INT[ext[counts]]]
s+=chg
if(s>maxone):
maxone=s
right_query=countt
hsp_right_seq=x+3+counts
counts+=1
countt+=1
if(maxone-s>=X or len(ext)==counts or pssm.shape[0]==countt):
break
### in left
if(x>0 and p>0):
ext=seq[0:x]
s=maxone
countt=p-1
counts=-1
while(True):
chg=pssm[countt,AA_TO_INT[ext[counts]]]
s+=chg
if(s>maxone):
maxone=s
left_one_hsp=countt
left_seq_hsp=x+counts
counts-=1
countt-=1
if(maxone-s>=X or -1*counts>len(ext) or countt<0):
break
if(maxone>=S):
tup=(left_one_hsp,left_seq_hsp,hsp_right_seq-left_seq_hsp+1,maxone)
if(seq not in d):
d[seq]=[tup]
else:
if(tup not in d[seq]):
d[seq].append(tup)
return d
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]
<file_sep>##############
# Exercise 1.5
##############
from collections import Counter
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
return len(self.__sequences)
def get_average_length(self):
print(self.__sequences)
total_sequence_len = 0
for sequence in self.__sequences:
total_sequence_len += len(sequence)
return total_sequence_len / len(self.__sequences)
def read_fasta(self, path):
with open(path, 'r') as f:
lines = f.readlines()
sequence = ''
for l in lines:
l = l.strip()
if not l:
self.__sequences.append(sequence)
sequence = ''
continue
if l.startswith('>'):
continue
l = l.strip('*')
sequence += l
self.__sequences.append(sequence)
def get_abs_frequencies(self):
# return number of occurences not normalized by length
aa_to_freq = Counter()
for sequence in self.__sequences:
aa_to_freq += Counter(sequence)
return dict(aa_to_freq)
def get_av_frequencies(self):
# return number of occurences normalized by length
abs_frequencies = self.get_abs_frequencies()
total_sequence_len = sum(abs_frequencies.values())
return {k: v/total_sequence_len for k, v in abs_frequencies.items()}
<file_sep>import numpy as np
from copy import deepcopy
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.alignments = []
self.align()
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
for row in range(len(self.string2) + 1):
self.score_matrix[row][0] = self.gap_penalty * row
for col in range(len(self.string1) + 1):
self.score_matrix[0][col] = self.gap_penalty * col
for row, y in enumerate(self.string2):
for col, x in enumerate(self.string1):
diag = self.score_matrix[row][col] + self.substitution_matrix[x][y] # diagonal jump
hori = self.score_matrix[row + 1][col] + self.gap_penalty # horizontal jump
verti = self.score_matrix[row][col + 1] + self.gap_penalty # vertical jump
maximum = max(diag, hori, verti)
self.score_matrix[row + 1][col + 1] = maximum
def get_best_score(self):
"""
:return: the highest score for the aligned strings, int
"""
column = self.score_matrix.shape[0]
row = self.score_matrix.shape[1]
return self.score_matrix[column-1][row-1]
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score
"""
self.get_alignments()
return len(self.alignments)
def rec_back_step(self, row, col, result):
#print("CALL")
if row < 0 or col < 0:
return
x = self.string1[row - 1]
y = self.string2[col - 1]
current = self.score_matrix[col][row]
di = self.score_matrix[col - 1][row - 1]
ho = self.score_matrix[col][row - 1]
ve = self.score_matrix[col - 1][row]
current_result = deepcopy(result)
#print("String 1: ", self.string1)
#print("String 2: ", self.string2)
#print("x: ", self.string1[row - 1])
#print("y: ", self.string2[col - 1])
#print("Row: ", row, " Col: ", col)
if row == 0 and col == 0:
current_result[0] = current_result[0][::-1]
current_result[1] = current_result[1][::-1]
tuple_result = tuple(current_result)
current_result[0] = current_result[0][:-1]
current_result[1] = current_result[1][:-1]
if tuple_result not in self.alignments:
#print("Appended")
#print(tuple_result)
self.alignments.append(tuple_result)
if current - self.substitution_matrix[x][y] == di: # diagonal jump possible
#print("DIAG")
current_result[0] += x
current_result[1] += y
#print(current_result)
self.rec_back_step(row - 1, col - 1, current_result)
current_result[0] = current_result[0][:-1]
current_result[1] = current_result[1][:-1]
if current - self.gap_penalty == ho: # horizontal jump possible
#print("HORI")
current_result[0] += x
current_result[1] += '-'
#print(current_result)
self.rec_back_step(row -1, col, current_result)
current_result[0] = current_result[0][:-1]
current_result[1] = current_result[1][:-1]
if current - self.gap_penalty == ve: # vertical jump possible
#print("VERT")
current_result[0] += '-'
current_result[1] += y
#print(current_result)
self.rec_back_step(row, col - 1, current_result)
current_result[0] = current_result[0][:-1]
current_result[1] = current_result[1][:-1]
return
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
col = self.score_matrix.shape[0] - 1
row = self.score_matrix.shape[1] - 1
self.rec_back_step(row, col, ['', ''])
return self.alignments
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as a list of lists
"""
return self.score_matrix
<file_sep>def get_orfs(genome):
code_dict = {
'TTA': 'L',
'TTG': 'L',
'CTT': 'L',
'CTC': 'L',
'CTA': 'L',
'CTG': 'L',
'AAA': 'K',
'AAG': 'K',
'TTT': 'F',
'TTC': 'F',
'CCT': 'P',
'CCC': 'P',
'CCA': 'P',
'CCG': 'P',
'TCT': 'S',
'TCC': 'S',
'TCA': 'S',
'TCG': 'S',
'AGT': 'S',
'AGC': 'S',
'ACT': 'T',
'ACC': 'T',
'ACA': 'T',
'ACG': 'T',
'TGG': 'W',
'TAT': 'Y',
'TAC': 'Y',
'GTT': 'V',
'GTC': 'V',
'GTA': 'V',
'GTG': 'V',
'GCT': 'A',
'GCC': 'A',
'GCA': 'A',
'GCG': 'A',
'CGT': 'R',
'CGC': 'R',
'CGA': 'R',
'CGG': 'R',
'AGA': 'R',
'AGG': 'R',
'AAT': 'N',
'AAC': 'N',
'GAT': 'D',
'GAC': 'D',
'TGT': 'C',
'TGC': 'C',
'CAA': 'Q',
'CAG': 'Q',
'GAA': 'E',
'GAG': 'E',
'GGT': 'G',
'GGC': 'G',
'GGA': 'G',
'GGG': 'G',
'CAT': 'H',
'CAC': 'H',
'ATT': 'I',
'ATC': 'I',
'ATA': 'I',
'TAG': '',
'TGA': '',
'TAA': '',
'ATG': 'M'
}
sett=list(set(genome))
start='ATG'
stop=['TAA','TAG','TGA']
ttt=['A','T','G','C']
for i in sett:
if(not i in ttt):
raise TypeError("Invalid")
def complementary(pro):
new=''
dictp={}
dictp['A']='T'
dictp['T']='A'
dictp['G']='C'
dictp['C']='G'
for i in range(len(pro)):
new=new+dictp[pro[i]]
return new
def read1(genome,frame,rev):
if(rev):
genome=complementary(genome[::-1])
i=frame-1
lst=[]
ind=[]
gg=genome+genome
while(True):
x=gg[i:i+3]
i+=3
lst.append(x)
ind.append((i-3)%len(genome))
if((x in stop) and i>=len(genome)):
break
return(lst,ind)
f1_f,ind1_f=read1(genome,1,False)
f2_f,ind2_f=read1(genome,2,False)
f3_f,ind3_f=read1(genome,3,False)
f1_b,ind1_b=read1(genome,1,True)
f2_b,ind2_b=read1(genome,2,True)
f3_b,ind3_b=read1(genome,3,True)
#len(genome)
#genome.find(["ATG","TAG"])
#genome[2724:2726]
def find_orf_in_frame(dat,ind,frame,rev):
lst=[]
supp=[]
hh=0
state=-1 #no start found yet
for i in range(len(dat)):
if(dat[i]==start):
if(state==-1):
state=ind[i]
hh=i
elif(dat[i] in stop):
if(state!=-1):
lst.append([state,ind[i]])
supp.append([hh,i])
state=-1
x4=0
# if(state!=-1):
# left=(len(genome)-(frame-1))%3
# strr=genome[len(genome)-left:len(genome)]
# strr+=genome
# lll=read1(strr,1,False)
# for kk in range(len(lll)):
# # print(lll[kk],left)
# if lll[kk] in stop:
# x4=(kk*3)-left
# break
# break
answer=[]
kkk=0
for i in lst:
start1=i[0]
stop1=(i[1]+2)%len(genome)
if(rev):
start1=len(genome)-1-start1
stop1=len(genome)-1-stop1
orf=""
# if(i[0]<i[1]):
# for j in range(i[0],i[1]+1):
# orf+=code_dict[dat[j]]
# else:
# for j in range(i[0],len(dat)):
# orf+=code_dict[dat[j]]
# for j in range(0,i[1]+1):
# orf+=code_dict[dat[j]]
for jj in range(supp[kkk][0],(supp[kkk][1]+1)):
orf+=code_dict[dat[jj]]
if(len(orf)>=34):
answer.append((start1,stop1,orf,rev))
kkk+=1
# if(state!=-1):
# start=(frame-1)+state*3
# stop=x4+2
# if(rev):
# start=len(genome)-1-start
# stop=len(genome)-1-stop
# orf=genome[start:len(genome)]
# orf+=genome[0:stop]
# haha=read1(orf,1,False)
# orf=""
# for i in haha:
# orf+=code_dict[i]
# # orf+=i
#
# if(len(orf)>=34):
# answer.append((start,stop,orf,rev,frame))
return answer
l1=find_orf_in_frame(f1_f,ind1_f,1,False)
l2=find_orf_in_frame(f2_f,ind2_f,2,False)
l3=find_orf_in_frame(f3_f,ind3_f,3,False)
l4=find_orf_in_frame(f1_b,ind1_b,1,True)
l5=find_orf_in_frame(f2_b,ind2_b,2,True)
l6=find_orf_in_frame(f3_b,ind3_b,3,True)
finlist=l1+l2+l3+l4+l5+l6
# len(finlist[0][2])
#len(finlist)
to_be_removed=[]
for i in range(len(finlist)):
for j in range(len(finlist)):
if(i!=j):
t1=finlist[i]
t2=finlist[j]
if(t1[1]==t2[1]):
if(len(t1[2])>len(t2[2])):
to_be_removed.append(j)
else:
to_be_removed.append(i)
to_be_removed=list(set(to_be_removed))
for i in to_be_removed:
del finlist[i]
return finlist
<file_sep>##############
# Exercise 2.6
##############
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
return len(self.__sequences)
def get_average_length(self):
allLen = 0
for i in range (0, len(self.__sequences)):
print(len(self.__sequences[i]))
allLen += len(self.__sequences[i])
res = allLen / len(self.__sequences)
print(res)
return res
def read_fasta(self, path):
with open(path, 'r') as f:
seq = ''
sequence_started = False
for line in f:
if line.startswith('>') or line.startswith(':'):
if sequence_started:
seq = seq.replace('*', '')
self.__sequences.append(seq)
seq = ''
sequence_started = False
continue
sequence_started = True
seq += line.strip()
seq = seq.replace('*', '')
self.__sequences.append(seq)
def get_abs_frequencies(self):
absValues = {
'A': 0,
'R': 0,
'N': 0,
'D': 0,
'C': 0,
'E': 0,
'Q': 0,
'G': 0,
'H': 0,
'I': 0,
'L': 0,
'K': 0,
'M': 0,
'F': 0,
'P': 0,
'S': 0,
'T': 0,
'W': 0,
'Y': 0,
'V': 0,
}
for i in range(0, len(self.__sequences)):
for x in range(0, len(self.__sequences[i])):
print(self.__sequences[i][x])
absValues[self.__sequences[i][x]] += 1
return absValues
def get_av_frequencies(self):
absValues = {
'A': 0,
'R': 0,
'N': 0,
'D': 0,
'C': 0,
'E': 0,
'Q': 0,
'G': 0,
'H': 0,
'I': 0,
'L': 0,
'K': 0,
'M': 0,
'F': 0,
'P': 0,
'S': 0,
'T': 0,
'W': 0,
'Y': 0,
'V': 0,
}
for i in range(0, len(self.__sequences)):
for x in range(0, len(self.__sequences[i])):
absValues[self.__sequences[i][x]] += 1
count = 0
for i in range(0, len(self.__sequences)):
for x in range(0, len(self.__sequences[i])):
count += 1
for i in absValues:
absValues[i] = absValues[i] / count
return absValues
<file_sep>##############
# Exercise 2.6
##############
import helper
from collections import Counter
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
sequences = []
abs_freqs = {}
av_freqs = {}
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
return len(self.sequences)
def get_average_length(self):
totalAACount = 0
for seq in self.sequences:
totalAACount = totalAACount + len(seq)
return totalAACount/float(self.get_counts())
def read_fasta(self, path):
self.sequences = helper.read_fasta(path)
def get_abs_frequencies(self):
# return number of occurences not normalized by length
totalAAseq = ''
for seq in self.sequences:
totalAAseq += seq
self.abs_freqs = Counter(totalAAseq)
return self.abs_freqs
def get_av_frequencies(self):
# return number of occurences normalized by length
totalAACount = 0
for seq in self.sequences:
totalAACount = totalAACount + len(seq)
for key in self.abs_freqs:
self.av_freqs[key] = self.abs_freqs[key]/totalAACount
return self.av_freqs
<file_sep>import numpy as np
from pathlib import Path
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.sequences = []
self.combinations = []
for firstLetter in ALPHABET:
for secondLetter in ALPHABET:
for thirdLetter in ALPHABET:
self.combinations.append(firstLetter+secondLetter+thirdLetter)
self.array_of_different_words = []
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
self.sequences.append(sequence)
for combination in self.combinations:
#if combination in sequence and combination not in self.array_of_different_words:
self.array_of_different_words.append(combination)
self.array_of_different_words = list(dict.fromkeys(self.array_of_different_words))
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
results = []
for sequence in self.sequences:
if word in sequence:
results.append(sequence)
return results
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
number_of_words_per_sequence = 0
number_of_sequences_per_word = 0
different_words = []
for sequence in self.sequences:
for x in range(0, len(sequence)-2):
word = sequence[x:x+3]
different_words.append(word)
number_of_words_per_sequence += len(self.get_words_in_sequence(sequence))
different_words = list(dict.fromkeys(different_words))
for combination in self.combinations:
number_of_sequences_per_word += len(self.get_sequences(combination))
return (
len(self.sequences),
len(different_words),
int(round(number_of_words_per_sequence/len(self.sequences))),
int(round(number_of_sequences_per_word/len(self.array_of_different_words)))
)
def get_words_in_sequence(self, sequence):
different_words = []
for x in range(0, len(sequence)-2):
word = sequence[x:x+3]
different_words.append(word)
different_words = list(dict.fromkeys(different_words))
return different_words
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.substitution_matrix = substitution_matrix
self.combinations = []
for firstLetter in ALPHABET:
for secondLetter in ALPHABET:
for thirdLetter in ALPHABET:
self.combinations.append(firstLetter+secondLetter+thirdLetter)
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
uniqueWords = []
if pssm is None:
for x in range(0, len(sequence)-2):
word = sequence[x:x+3]
for combination in self.combinations:
sum = self.substitution_matrix[AA_TO_INT[word[0]]][AA_TO_INT[combination[0]]] + self.substitution_matrix[AA_TO_INT[word[1]]][AA_TO_INT[combination[1]]] + self.substitution_matrix[AA_TO_INT[word[2]]][AA_TO_INT[combination[2]]]
if sum >= T:
uniqueWords.append(combination)
print(list(dict.fromkeys(uniqueWords)))
return list(dict.fromkeys(uniqueWords))
else:
for x in range(0, len(pssm)-2):
for combination in self.combinations:
sum = pssm[x][AA_TO_INT[combination[0]]] + pssm[x+1][AA_TO_INT[combination[1]]] + pssm[x+2][AA_TO_INT[combination[2]]]
if sum >= T:
uniqueWords.append(combination)
print(list(dict.fromkeys(uniqueWords)))
return list(dict.fromkeys(uniqueWords))
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]
def main():
print('BlastDb')
blastDB = BlastDb()
blastDB.add_sequence("SEQW")
blastDB.add_sequence("SEQM")
blastDB.add_sequence("RRRR")
blastDB.get_db_stats()
if __name__ == '__main__':
main()
<file_sep>import numpy as np
from pathlib import Path
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
# list of sequences stored in DB
self.sequences = []
# unique words found in all sequences
self.unique_words = set()
# list of unique words per sequence (each object in list represents 1 sequence in same order as self.sequences)
self.unique_words_ordered = []
#dict of all word in sequences. Each key has list of sequences in DB the word is contained in
self.word_dict = {} #{word1: [seq1, seq2]}
# self.word_dict_index = {}
self.word_target_index = {}
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
#add sequence
self.sequences.append(sequence)
word_list = []
words_set = set()
curr_word_dict = {}
# curr_word_dict_index = {}
# get additional info
for foo in range(len(sequence)-2):
curr_word = sequence[foo:foo+3]
# add word to list of all words (inkl duplicates) in sequence
word_list.append(curr_word)
# add word to set of all unique words in sequence
words_set.add(curr_word)
# add word to set of all unique words in DB
self.unique_words.add(curr_word)
# add each sequence to sequence dict (ordered by words)
if curr_word not in curr_word_dict:
curr_word_dict[curr_word] = []
curr_word_dict[curr_word].append(sequence)
# curr_word_dict_index[curr_word] = []
if curr_word not in self.word_target_index:
self.word_target_index[curr_word]=[]
# curr_word_dict_index[curr_word].append((sequence, foo))
self.word_target_index[curr_word].append((sequence, foo))
#add sequence to DB dict (ordered by words), sequences can occur several times if they are in the DB several times
for bar in curr_word_dict:
if bar not in self.word_dict:
self.word_dict[bar] = []
# self.word_dict_index[bar] = {}
self.word_dict[bar].append(sequence)
# self.word_dict_index[bar] = curr_word_dict_index[bar]
# add new word set to list of word sets for all sequences
self.unique_words_ordered.append(words_set)
#self.words.append(word_list)
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
return self.word_dict[word]
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
av_word_count = 0.0 # total number of unique words found per sequence/ number of sequences in DB
av_seq_count = 0.0 # total number of sequences each word is in/ number of unique words
# get average word dividend (word total)
for seq in range(len(self.unique_words_ordered)):
av_word_count += len(self.unique_words_ordered[seq])
# get average sequence dividend (sequence total)
for word in self.unique_words:
curr = self.word_dict[word]
av_seq_count += len(curr)
#calculate averages
av_word_count = av_word_count / len(self.sequences)
av_seq_count = av_seq_count / len(self.unique_words)
return (len(self.sequences), len(self.unique_words), int(round(av_word_count)), int(round(av_seq_count)))
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
# substitution matrix given
self.substitution_matrix = substitution_matrix
# highest value in substitution matrix
self.sub_high = self.substitution_matrix[0][0]
# calculate value
for foo in range(len(self.substitution_matrix)):
for bar in range(len(self.substitution_matrix[foo])):
if self.substitution_matrix[foo][bar] > self.sub_high:
self.sub_high = self.substitution_matrix[foo][bar]
# dict of target words (keys) and list of the indices of the words they are replacing in the query sequence (values)
# self.word_index_dict = {} # {tar_wor11: [query_ind1, query_ind4], ...}
#dict of index of words that can be replaced (keys) and list of words they can be replaced by (values)
# self.index_word_dict = {} # {query_ind1: [tar_word1, tar_word2], ...}
self.search1_results = {}
self.word_pos_query_pairs = []
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
# self.word_index_dict = {} # {tar_wor11: [query_ind1, query_ind4], ...}
# dict of index of words that can be replaced (keys) and list of words they can be replaced by (values)
# self.index_word_dict = {} # {query_ind1: [tar_word1, tar_word2], ...}
self.search1_results = {}
self.word_pos_query_pairs = []
local_word_pos_pairs = []
# uups, code reuse... sort of... oh well
# unique words above threshold in query sequence
words = set()
# sequence is given
if sequence:
#look at each word in query sequence and find all words above threshold
for foo in range(len(sequence) - 2):
#get current word
curr_word = sequence[foo:foo + 3]
#get indices in substitution matrix of current word
index1 = AA_TO_INT[curr_word[0]]
index2 = AA_TO_INT[curr_word[1]]
index3 = AA_TO_INT[curr_word[2]]
#look at first character in current word
for in1 in range(len(self.substitution_matrix)):
#calc score of first character
curr_score = self.substitution_matrix[index1][in1]
score1 = curr_score
#make sure there is a chance for the score to be above the threshold if current word is optimal
if score1 + 2*self.sub_high >= T:
#look at 2nd character in current word
for in2 in range(len(self.substitution_matrix)):
curr_score = self.substitution_matrix[index2][in2]
score2 = score1 + curr_score
# make sure there is a chance for the score to be above the threshold if current word is optimal
if score2 + self.sub_high >= T:
# look at 3rd character in current word
for in3 in range(len(self.substitution_matrix)):
curr_score = self.substitution_matrix[index3][in3]
score3 = score2 + curr_score
#check if end score is above threshold
if score3 >= T:
#get found word
wor = INT_TO_AA[in1]+INT_TO_AA[in2]+INT_TO_AA[in3]
#add found word to list of unique words in sequence
words.add(wor)
#add found word and location of word it can replace
# if not wor in self.word_index_dict:
# self.word_index_dict[wor] = []
# self.word_index_dict[wor].append(foo)
local_word_pos_pairs.append((wor, foo, score3))
#add location of word that can be replaced and found word
# if not foo in self.index_word_dict:
# self.index_word_dict[foo] = []
# self.index_word_dict[foo].append(wor)
#pssm is given
else:
#get highest value in pssm
highest = pssm[0][0]
for foo in range(len(pssm)):
for bar in range(len(pssm[foo])):
if pssm[foo][bar] > highest:
highest = pssm[foo][bar]
#look at each "word" in pssm and find all words above threshold
for foo in range(len(pssm) - 2):
# get indices in pssm of current word
index1 = foo
index2 = foo + 1
index3 = foo + 2
# look at first character in current word
for in1 in range(20):
curr_score = pssm[index1][in1]
score1 = curr_score
# make sure there is a chance for the score to be above the threshold if current word is optimal
if score1 + 2 * highest >= T:
# look at 2nd character in current word
for in2 in range(20):
curr_score = pssm[index2][in2]
score2 = score1 + curr_score
# make sure there is a chance for the score to be above the threshold if current word is optimal
if score2 + highest >= T:
# look at 3rd character in current word
for in3 in range(20):
curr_score = pssm[index3][in3]
score3 = score2 + curr_score
# check if end score is above threshold
if score3 >= T:
# get found word
wor = INT_TO_AA[in1] + INT_TO_AA[in2] + INT_TO_AA[in3]
# add found word to list of unique words in sequence
words.add(wor)
# add found word and location of word it can replace
# if not wor in self.word_index_dict:
# self.word_index_dict[wor] = []
# self.word_index_dict[wor].append(foo)
local_word_pos_pairs.append((wor, foo, score3))
# add location of word that can be replaced and found word
# if not foo in self.index_word_dict:
# self.index_word_dict[foo] = []
# self.index_word_dict[foo].append(wor)
self.word_pos_query_pairs = local_word_pos_pairs
return list(words)
def calc_score(self, word1, word2):
score = self.substitution_matrix[AA_TO_INT[word1[0]]][AA_TO_INT[word2[0]]]
score += self.substitution_matrix[AA_TO_INT[word1[1]]][AA_TO_INT[word2[1]]]
score += self.substitution_matrix[AA_TO_INT[word1[2]]][AA_TO_INT[word2[2]]]
return score
def get_all_indices(self, word, sequence):
res = []
for foo in range(len(sequence)-2):
curr = sequence[foo:foo + 3]
if curr == word:
res.append(foo)
return res
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplicates).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
unique_target_words = self.get_words(sequence=query, pssm=pssm, T=T)
#get word, targetseq, location
for pair in self.word_pos_query_pairs:
tar_word = pair[0]
query_ind = pair[1]
seq_loc_pairs = blast_db.word_target_index[tar_word]
for seq_loc in seq_loc_pairs:
score = pair[2]
tar_seq = seq_loc[0]
tar_ind = seq_loc[1]
#move right
hsp, score, end_tar_ind, end_qu_ind = self.get_score(tar_ind + 3, tar_seq, query_ind + 3, query, score, X, pssm, tar_word, 1)
# move left
hsp, score, start_tar_ind, start_qu_ind = self.get_score(tar_ind - 1, tar_seq, query_ind - 1, query, score, X, pssm, hsp, -1)
if score >= S:
if not tar_seq in self.search1_results:
self.search1_results[tar_seq] = set()
self.search1_results[tar_seq].add((start_qu_ind, start_tar_ind, len(hsp), score))
return self.search1_results
def get_score(self, tar_ind, tar_seq, query_ind, query_seq, score, X, pssm, hsp, direc):
high_score = score
start_tar_ind = tar_ind
if direc > 0:
high_tar = tar_ind-3
high_qu = query_ind-3
else:
high_tar = tar_ind+1
high_qu = query_ind+1
local_hsp = ""
if query_seq:
length_query_pssm = len(query_seq)
else:
length_query_pssm = len(pssm)
if direc > 0:
while (query_ind < length_query_pssm) and (tar_ind < len(tar_seq)) and (score > (high_score - X)):
local_hsp += tar_seq[tar_ind]
tar_ind, query_ind, high_score, score, local_hsp, hsp, high_tar, high_qu = self.calc_new_scores(pssm, query_seq, hsp, score, high_score, local_hsp, query_ind, tar_seq, tar_ind, direc, high_tar, high_qu)
else:
while (query_ind >= 0) and (tar_ind >= 0) and (score > (high_score - X)):
local_hsp = tar_seq[tar_ind] + local_hsp
tar_ind, query_ind, high_score, score, local_hsp, hsp, high_tar, high_qu = self.calc_new_scores(pssm, query_seq, hsp, score, high_score, local_hsp, query_ind, tar_seq, tar_ind, direc, high_tar, high_qu)
return hsp, high_score, high_tar, high_qu
def calc_new_scores(self, pssm, query_seq, hsp, score, high_score, local_hsp, query_ind, tar_seq, tar_ind, direc, high_tar, high_qu):
if query_seq:
score = score + self.substitution_matrix[AA_TO_INT[query_seq[query_ind]]][AA_TO_INT[tar_seq[tar_ind]]]
else:
score = score + pssm[query_ind][AA_TO_INT[tar_seq[tar_ind]]]
if score > high_score:
high_score, hsp, local_hsp = self.get_new_highscore(score, hsp, local_hsp, direc)
high_tar = tar_ind
high_qu = query_ind
tar_ind += direc
query_ind += direc
return tar_ind, query_ind, high_score, score, local_hsp, hsp, high_tar, high_qu
def get_new_highscore(self, score, hsp, local_hsp, direc):
high_score = score
if direc > 0:
hsp += local_hsp
else:
hsp = local_hsp + hsp
return high_score, hsp, ""
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]
<file_sep>##############
# Exercise 2.5
##############
codontable = {
"TTT" : "F", "TTC" : "F", "TTA" : "L", "TTG" : "L",
"CTT" : "L", "CTC" : "L", "CTA" : "L", "CTG" : "L",
"ATT" : "I", "ATC" : "I", "ATA" : "I", "ATG" : "M",
"GTT" : "V", "GTC" : "V", "GTA" : "V", "GTG" : "V",
"TCT" : "S", "TCC" : "S", "TCA" : "S", "TCG" : "S",
"CCT" : "P", "CCC" : "P", "CCA" : "P", "CCG" : "P",
"ACT" : "T", "ACC" : "T", "ACA" : "T", "ACG" : "T",
"GCT" : "A", "GCC" : "A", "GCA" : "A", "GCG" : "A",
"TAT" : "Y", "TAC" : "Y", "TAA" : "_", "TAG" : "_",
"CAT" : "H", "CAC" : "H", "CAA" : "Q", "CAG" : "Q",
"AAT" : "N", "AAC" : "N", "AAA" : "K", "AAG" : "K",
"GAT" : "D", "GAC" : "D", "GAA" : "E", "GAG" : "E",
"TGT" : "C", "TGC" : "C", "TGA" : "_", "TGG" : "W",
"CGT" : "R", "CGC" : "R", "CGA" : "R", "CGG" : "R",
"AGT" : "S", "AGC" : "S", "AGA" : "R", "AGG" : "R",
"GGT" : "G", "GGC" : "G", "GGA" : "G", "GGG" : "G"
}
complementary=lambda y:''.join(chr(ord(x)+int(-49.5*(ord(x)%5)+32*(ord(x)%5)**2-5.5*(ord(x)%5)**3)+19) for x in y)
codons_to_aa=lambda y:''.join(map(codontable.get,[y[i:i+3] for i in range(0, len(y), 3)]))
def circular(genome):
refined = ["".join([genome]*3)[i:i+3] for i in range(0, len("".join([genome]*3)), 3)]
while True:
for ns in refined:
yield ns
def get_orfs(genome):
def c():
refined = ["".join([genome] * 3)[i:i + 3] for i in range(0, len("".join([genome] * 3)), 3)]
while True:
for ns in refined:
yield ns
def rc():
refined = [complementary("".join([genome[::-1]] * 3)[i:i + 3]) for i in range(0, len("".join([complementary(genome[::-1])] * 3)), 3)]
while True:
for ns in refined:
yield ns
iterator = c()
reverse_iterator = rc()
partial_dnas = []
transcribing = False
part_dna = ''
start_index = None
try:
codons_to_aa(genome+genome+genome)
except:
raise TypeError("Input not a DNA sequence!")
for index in range(0,len(genome)*4,3):
item = next(iterator)
if (item == 'ATG' and not transcribing):
if not transcribing:
start_index = int(index)
transcribing = True
part_dna +=''.join(map(codontable.get,[item[i:i+3] for i in range(0, len(item), 3)]))
continue
if item in ['TAA','TAG','TGA']:
transcribing = False
if (len(part_dna) > 33):
partial_dnas.append((start_index%len(genome), (int(index)+2)%len(genome), part_dna, False))
part_dna = ''
start_index = None
continue
if transcribing: part_dna +=''.join(map(codontable.get,[item[i:i+3] for i in range(0, len(item), 3)]))
for index in range(len(genome)*4,0,-3):
item = next(reverse_iterator)
if (item == 'ATG' and (not transcribing)):
if not transcribing:
start_index = int(index)
transcribing = True
part_dna +=''.join(map(codontable.get,[item[i:i+3] for i in range(0, len(item), 3)]))
continue
if item in ['TAA','TAG','TGA']:
transcribing = False
if (len(part_dna) > 33):
partial_dnas.append(((start_index-1)%len(genome),(int(index)-3)%len(genome), part_dna, True))
part_dna = ''
start_index = None
continue
if transcribing: part_dna +=''.join(map(codontable.get,[item[i:i+3] for i in range(0, len(item), 3)]))
final = list(set(partial_dnas))
final = sorted(final, key=lambda tup: tup[0])
final = sorted(final, key=lambda tup: tup[3])
return(final[:-1])<file_sep>import numpy as np
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.max_val = 0
self.max_indicies = []
self.start_indices = []
self.align()
self.alignmentList = []
self.alignment_finder(self.max_indicies[0][0],self.max_indicies[0][1],"","")
def align(self):
"""
Align given strings using the Smith-Waterman algorithm.
NB: score matrix and the substitution matrix are different matrices!
"""
row_length = len(self.string2) + 1
col_length = len(self.string1) + 1
# row
for i in range(row_length):
self.score_matrix[i, 0] = 0
# col
for i in range(col_length):
self.score_matrix[0, i] = 0
for i in range(1, row_length):
for j in range(1, col_length):
item_score = self.substitution_matrix[self.string1[j-1]][self.string2[i-1]]
match = self.score_matrix[i-1, j-1] + item_score
delete = self.score_matrix[i-1, j] + self.gap_penalty
insert = self.score_matrix[i, j-1] + self.gap_penalty
self.score_matrix[i ,j] = max(match, delete, insert,0)
# find max_val
self.max_val = np.amax(self.score_matrix)
self.max_indicies = np.argwhere(self.score_matrix == self.max_val)
def alignment_finder(self, i, j, alignment1, alignment2):
# found
if i == 0 and j == 0:
if self.score_matrix[i, j] == 0:
self.alignmentList.append((alignment1, alignment2))
self.start_indices.append((i,j))
return
if self.score_matrix[i,j] == 0:
self.alignmentList.append((alignment1, alignment2))
self.start_indices.append((i,j))
return
# top
elif i == 0:
self.alignment_finder(i, j - 1, self.string1[j-1] + alignment1, "-" + alignment2)
# leftmost
elif j == 0:
self.alignment_finder(i-1, j, "-" + alignment1, self.string2[i-1] + alignment2)
# middle
else:
# up
if self.score_matrix[i-1,j] + self.gap_penalty == self.score_matrix[i,j]:
self.alignment_finder(i-1, j, "-" + alignment1, self.string2[i-1] + alignment2)
# left
if self.score_matrix[i, j-1] + self.gap_penalty == self.score_matrix[i,j]:
self.alignment_finder(i, j-1, self.string1[j-1] + alignment1, "-" + alignment2)
# diag
if self.score_matrix[i-1, j-1] + self.substitution_matrix[self.string2[i-1]][self.string1[j-1]] == self.score_matrix[i,j]:
self.alignment_finder(i-1, j-1, self.string1[j-1] + alignment1, self.string2[i-1] + alignment2)
def has_alignment(self):
"""
:return: True if a local alignment has been found, False otherwise
"""
return self.max_val > 0
def get_alignment(self):
"""
:return: alignment represented as a tuple of aligned strings
"""
return self.alignmentList[0]
def is_residue_aligned(self, string_number, residue_index):
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been alined
False otherwise
"""
if string_number == 1:
if self.start_indices[0][1] <= residue_index and self.max_indicies[0][1] >= residue_index:
return True
else:
return False
if string_number == 2:
if self.start_indices[0][0] <= residue_index+1 and min(self.max_indicies[0][0],len(self.string2)-1) >= residue_index+1:
return True
else:
return False
<file_sep>import numpy as np
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
# at least on seq
if len(sequences) == 0: raise TypeError
# all seq has same length
length = len(sequences[0])
for seq in sequences:
if len(seq)!=length: raise TypeError
# all seq contain validate amino acid include gap char, i.e in alphabet
for seq in sequences:
ss = set(seq)
for s in ss:
if s not in ALPHABET: raise TypeError
self.sequences = sequences
# compute msa_mat,(n_seq,length)
n_sequence,length = self.get_size()
msa_mat = np.zeros((n_sequence,length))
for i in range(n_sequence):
for j in range(length):
msa_mat[i,j] = AA_TO_INT[sequences[i][j]]
self.msa_mat = msa_mat
self.sequences_weights = self.get_sequence_weights()
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
# check the bg_matix
if bg_matrix==None:
backgroud_prob = np.full((20,),0.05)
substitution_prob = np.full((20,20),0.0025)
else:
backgroud_prob = np.sum(bg_matrix,axis=0)
substitution_prob = np.asarray(bg_matrix)
# caculate basic pssm
# count oberved aa including gaps
n_sequences,length = self.get_size()
basic_pssm = np.zeros((length,21))
for i in range(length):
for j,seq in enumerate(self.sequences):
if use_sequence_weights:
# apply sequence weight
sequence_weight = self.sequences_weights
basic_pssm[i, AA_TO_INT[seq[i]]] = basic_pssm[i, AA_TO_INT[seq[i]]]+sequence_weight[j]
else: basic_pssm[i,AA_TO_INT[seq[i]]] = basic_pssm[i,AA_TO_INT[seq[i]]] +1
# redistribute gaps acording to background frequency
if redistribute_gaps:
# gaps_columns = basic_pssm[:,-1]*0.05
gaps_columns = basic_pssm[:,-1]
# gap_mat = np.tile(gaps_columns,(21,1)).T
gap_mat = np.zeros((length,21))
for i in range(20):
gap_mat[:,i] = gaps_columns*backgroud_prob[i]
# gap_mat[:,-1] = 0
basic_pssm = basic_pssm + gap_mat
# add weighted pseudocounts
if add_pseudocounts:
msa_mat = self.msa_mat
g = np.zeros((length,21))
for i in range(length):
f = basic_pssm[i,0:-1]
g[i,0:-1] = np.dot(f/backgroud_prob,substitution_prob)
# weighted pc
N = self.get_number_of_observations()
alpha = N -1
basic_pssm = (alpha*basic_pssm + beta*g)/(alpha+beta)
# normalize
row_sum = np.sum(basic_pssm,axis=1)-basic_pssm[:,-1]
row_sum = np.tile(row_sum,(21,1))
basic_pssm = basic_pssm/row_sum.T
# delete last gap column
basic_pssm = basic_pssm[:,0:-1]
# divided by background f = 0.05
# basic_pssm = basic_pssm/0.05
basic_pssm = basic_pssm/backgroud_prob
# loglization
basic_pssm = 2*np.log2(basic_pssm)
pssm = basic_pssm
# delete gap column
first_seq = self.sequences[0]
gap_rows = [i for i, c in enumerate(self.sequences[0]) if c == "-"]
pssm = np.delete(pssm,gap_rows,axis=0)
# # delete last gap column
# pssm = pssm[:,0:-1]
# replace -inf with -20
pssm = np.where(pssm != -np.inf, pssm, -20)
return np.round(pssm).astype(np.int64)
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
n_sequence = len(self.sequences)
length = len(self.sequences[0])
return (n_sequence, length)
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
primary = ''.join(c for c in self.sequences[0] if c!="-" )
return primary
# def get_sequence_weights(self):
# """
# Return the calculated sequence weights for all sequences in the MSA.
# The order of weights in the array must be equal to the order of the
# sequences in the MSA.
#
# :return: Numpy array (dtype=numpy.float64) containing the weights for
# all sequences in the MSA.
# """
# n_sequence, length = self.get_size()
# # construct a matrix w_ik
# weight_mat = np.zeros((length,n_sequence))
# for i in range(length):
# # for every row i, (column in MSA)
# aa_set = set()
# column_i = np.zeros(n_sequence) # store the aas in column i in MSA
# for j in range(n_sequence):
# seq = self.sequences[j]
# column_i[j] = AA_TO_INT[seq[i]]
# aa_set.add(seq[i]) # add aa at postion i into set to caculate the different aas occured in column i in MSA
# r_i = len(aa_set)
# s_ik = np.zeros(n_sequence)
# for k in range(n_sequence):
# equ = [i for i, aa in enumerate(column_i) if aa == column_i[k]]
# s_ik[k] = len(equ)
# if(r_i!=1):weight_mat[i,:] = 1/(s_ik*r_i)
#
#
# weights = np.sum(weight_mat,axis=0)
#
#
#
# return weights.astype(np.float64)
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
n_sequence, length = self.get_size()
# construct a matrix w_ik
weight_mat = np.zeros((length,n_sequence))
msa_mat = self.msa_mat
for i in range(length):
# for every row i, (column in MSA)
column_i = msa_mat[:,i]
r_i = len(set(column_i))
s_ik = np.zeros(n_sequence)
for k in range(n_sequence):
equ = np.where(column_i == column_i[k])[0]
# equ = [i for i, aa in enumerate(column_i) if aa == column_i[k]]
s_ik[k] = len(equ)
if (r_i != 1): weight_mat[i, :] = 1 / (s_ik * r_i)
weights = np.sum(weight_mat,axis=0)
return weights.astype(np.float64)
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
n_sequence, length = self.get_size()
sum_r = 0
msa_mat = self.msa_mat
for i in range(length):
column_i = msa_mat[:, i]
r_i = len(set(column_i))
sum_r = sum_r + r_i
num_obs = sum_r/length
return num_obs
if __name__ == '__main__':
import json
with open("/Users/wangyu/Documents/Protein_prediction/exercise/pp1ss19exercise4-exercise-ge56sen/tests/pssm_test.json") as f:
json_data = json.load(f)
# print(json_data["msa_sequences"])
msa = MSA(json_data["msa_sequences"])
# pssm = msa.get_pssm()
# print(json_data["pssm_01"][0])
# print(pssm[0])
# diff = np.abs(pssm - json_data["pssm_01"]) # absolute difference
# print(diff)
# diff = np.sum(diff == 1) # number of positions where the difference is 1
# print(diff)
# c = '-'
# a = AA_TO_INT[c]
# print(a)
# a = msa.get_sequence_weights()
# print(a)
# b = json_data["sequence_weights"]
# print(b)
# msa = MSA(json_data["invalid_msa"][1])
# print(msa.sequences)
seq = ["SE-AN","SE-ES","SEVEN","SE-AS"]
msa = MSA(seq)
a = msa.get_pssm()
print(a)
print(msa.msa_mat)
<file_sep># -*- coding: utf-8 -*-
"""
IMPORTANT!:
Before writing an email asking questions such as
'What does this input has to be like?' or
'What return value do you expect?' PLEASE read our
exercise sheet and the information in this template
carefully.
If something is still unclear, PLEASE talk to your
colleagues before writing an email!
If you experience technical issues or if you find a
bug we are happy to answer your questions. However,
in order to provide quick help in such cases we need
to avoid unnecessary emails such as the examples
shown above.
"""
from Bio.SeqUtils import seq1
from Bio.PDB.MMCIFParser import MMCIFParser # Tip: This module might be useful for parsing...
import numpy as np
############# Exercise 2: Protein Data Bank #############
# General remark: In our exercise every structure will have EXACTLY ONE model.
# This is true for nearly all X-Ray structures. NMR structures have several models.
class PDB_Parser:
CIF_PARSER = MMCIFParser # parser object for reading in structure in CIF format
def __init__( self, path ):
'''
Initialize every PDB_Parser with a path to a structure-file in CIF format.
An example file is included in the repository (7ahl.cif).
Tip: Store the parsed structure in an object variable instead of parsing it
again & again ...
'''
self.structure = self.CIF_PARSER().get_structure(path.split('/')[-1].split('.cif')[0], path)
# 3.8 Chains
def get_number_of_chains( self ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
Return:
Number of chains in this structure as integer.
'''
n_chains = len(self.structure.child_list[0].child_list)
return n_chains
# 3.9 Sequence
def get_sequence( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the amino acid sequence (single-letter alphabet!) of a given chain (chain_id)
in a Biopython.PDB structure as a string.
'''
sequence = []
chain = self.structure.child_list[0].child_dict[chain_id]
for triplet in chain.child_list:
if triplet.resname != 'HOH':
sequence.extend(triplet.resname)
return seq1(''.join(sequence))
# 3.10 Water molecules
def get_number_of_water_molecules( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the number of water molecules of a given chain (chain_id)
in a Biopython.PDB structure as an integer.
'''
n_waters = 0
chain = self.structure.child_list[0].child_dict[chain_id]
for triplet in chain.child_list:
if triplet.resname == 'HOH':
n_waters += 1
return n_waters
# 3.11 C-Alpha distance
def get_ca_distance( self, chain_id_1, index_1, chain_id_2, index_2 ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id_1 : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
index_1 : index of a residue in a given chain in a Biopython.PDB structure
chain_id_2 : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
index_2 : index of a residue in a given chain in a Biopython.PDB structure
chain_id_1 and index_1 describe precisely one residue in a PDB structure,
chain_id_2 and index_2 describe the second residue.
Return:
Return the C-alpha (!) distance between the two residues, described by
chain_id_1/index_1 and chain_id_2/index_2. Round the returned value via int().
The reason for using two different chains as an input is that also the distance
between residues of different chains can be interesting.
Different chains in a PDB structure can either occur between two different proteins
(Heterodimers) or between different copies of the same protein (Homodimers).
'''
# coord1 = None
# coord2 = None
#
# if index_1 in self.structure.child_list[0].child_dict[chain_id_1]:
# coord1 = self.structure.child_list[0].child_dict[chain_id_1][index_1].child_dict['CA'].coord
#
# if index_2 in self.structure.child_list[0].child_dict[chain_id_2]:
# coord2 = self.structure.child_list[0].child_dict[chain_id_2][index_2].child_dict['CA'].coord
#
# if coord1 is not None and coord2 is not None:
# return int(np.linalg.norm(coord1-coord2))
# else:
# return None
coord1 = self.structure.child_list[0].child_dict[chain_id_1][index_1].child_dict['CA'].coord
coord2 = self.structure.child_list[0].child_dict[chain_id_2][index_2].child_dict['CA'].coord
ca_distance = np.linalg.norm(coord1 - coord2)
return int(ca_distance)
# 3.12 Contact Map
def get_contact_map( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return a complete contact map (see description in exercise sheet)
for a given chain in a Biopython.PDB structure as numpy array.
The values in the matrix describe the c-alpha distance between all residues
in a chain of a Biopython.PDB structure.
Only integer values of the distance have to be given (see below).
'''
chain = self.structure.child_list[0].child_dict[chain_id]
valid_id = []
for residue in chain.get_residues():
if residue.id[0] == ' ':
valid_id.append(residue.id)
length = len(valid_id)
contact_map = np.zeros((length, length), dtype=np.float32)
for i, id_1 in enumerate(valid_id):
for j, id_2 in enumerate(valid_id):
contact_map[i, j] = self.get_ca_distance(chain_id, id_1, chain_id, id_2)
return contact_map.astype(np.int)
# 3.13 B-Factors
def get_bfactors( self, chain_id ):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the B-Factors for all residues in a chain of a Biopython.PDB structure.
The B-Factors describe the mobility of an atom or a residue.
In a Biopython.PDB structure B-Factors are given for each atom in a residue.
Calculate the mean B-Factor for a residue by averaging over the B-Factor
of all atoms in a residue.
Sometimes B-Factors are not available for a certain residue;
(e.g. the residue was not resolved); insert np.nan for those cases.
Finally normalize your B-Factors using Standard scores (zero mean, unit variance).
You have to use np.nanmean, np.nanvar etc. if you have nan values in your array.
The returned data structure has to be a numpy array rounded again to integer.
'''
chain = self.structure.child_list[0].child_dict[chain_id]
length = len(chain.child_list) - self.get_number_of_water_molecules(chain_id)
b_factors = np.zeros((length,), dtype=np.float32)
for i in range(length):
residue = chain.child_list[i]
n_atoms = len(residue.child_list)
for atom in residue.child_list:
if np.isnan(atom.bfactor):
b_factors[i] = np.nan
break
else:
b_factors[i] += atom.bfactor / n_atoms
b_mean = np.nanmean(b_factors)
b_var = np.nanvar(b_factors)
b_factors = (b_factors - b_mean) / np.sqrt(b_var)
return b_factors.astype(np.int) # return rounded (integer) values
def main():
print('PDB parser class.')
parser = PDB_Parser("tests/7ahl.cif")
parser.get_number_of_water_molecules('A')
parser.get_ca_distance('A', 121, 'E', 120)
parser.get_contact_map('E')
return None
if __name__ == '__main__':
main()
<file_sep>##############
# Exercise 2.5
##############
from itertools import zip_longest, repeat
genetic_code = {
'TTT': 'F', 'TTC': 'F',
'TTA': 'L', 'TTG': 'L', 'CTT': 'L', 'CTA': 'L', 'CTG': 'L', 'CTC': 'L',
'ATT': 'I', 'ATC': 'I', 'ATA': 'I',
'ATG': 'M',
'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V',
'TCT': 'S', 'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'AGT': 'S', 'AGC': 'S',
'CCT': 'P', 'CCC': 'P', 'CCA': 'P', 'CCG': 'P',
'ACT': 'T', 'ACC': 'T', 'ACA': 'T', 'ACG': 'T',
'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A',
'TAT': 'Y', 'TAC': 'Y',
'CAT': 'H', 'CAC': 'H',
'CAA': 'Q', 'CAG': 'Q',
'AAT': 'N', 'AAC': 'N',
'AAA': 'K', 'AAG': 'K',
'GAT': 'D', 'GAC': 'D',
'GAA': 'E', 'GAG': 'E',
'TGT': 'C', 'TGC': 'C',
'TGG': 'W',
'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R', 'AGA': 'R', 'AGG': 'R',
'GGT': 'G', 'GGC': 'G', 'GGA': 'G', 'GGG': 'G'
}
def __valid_base(base):
return base == 'A' or base == 'C' or base == 'G' or base == 'T'
def __is_valid_dna_seq(genome):
return all([__valid_base(x) for x in genome])
def __translate(triplet):
return genetic_code.get(triplet, None)
def __chunks(iterator, chunk_size):
elems = [iter(iterator)] * chunk_size
return zip_longest(*elems, fillvalue=None)
def __to_polypeptide(genome):
res = []
read_num = 0
for chunk in __chunks(genome, 3):
count_none = sum([1 for x in chunk if x is None])
if count_none > 0:
return res, read_num
joined = str.join('', chunk)
trnsl = __translate(joined)
if not trnsl:
return str.join('', res), read_num + len(joined)
read_num += 3
res.append(trnsl)
return str.join('', res), read_num
def __complement(n):
if n == 'A':
return 'T'
elif n == 'T':
return 'A'
elif n == 'G':
return 'C'
elif n == 'C':
return 'G'
else:
return n
def __to_tuple(seq):
return 0, 0, seq, True
def __take_base_if(bases, pred):
return [b for b in bases if pred(b)]
def get_orfs(genome):
genome = genome.upper()
if not __is_valid_dna_seq(genome):
raise TypeError
genome_length = len(genome)
genome = genome * 2
# start_codon = 'ATG'
indices = [i for i, _ in enumerate(genome) if
i < genome_length and genome[i] == 'A' and genome[i + 1] == 'T' and genome[i + 2] == 'G']
res = []
for start_idx in indices:
gen_start = genome[start_idx:]
parsed, read_num = __to_polypeptide(gen_start)
if len(parsed) > 33:
start = start_idx
end = start_idx + read_num - 1
if end >= genome_length:
end = end - genome_length
res.append((start, end, parsed, False))
rev_genome = str.join('', map(__complement, genome))[::-1]
# start_codon_reverse = 'TAC'
indices = [i for i, _ in enumerate(rev_genome) if
i < genome_length and rev_genome[i] == 'A' and rev_genome[i + 1] == 'T' and rev_genome[i + 2] == 'G']
for start_idx in indices:
gen_start = rev_genome[start_idx:]
parsed, read_num = __to_polypeptide(gen_start)
if len(parsed) > 33:
start = genome_length - start_idx - 1
end = genome_length - (start_idx + read_num)
if end < 0:
end = genome_length - end
res.append((start, end, parsed, True))
final_res = []
for r in res:
start = r[0]
stop = r[1]
is_reverse = r[3]
filt = list(filter(lambda x: x[1] == stop and x[0] < start, final_res)) if is_reverse else list(
filter(lambda x: x[1] == stop and x[0] > start, final_res))
if len(filt) > 0:
# replace for same end, later start
to_replace = filt[0]
final_res.remove(to_replace)
filt = list(filter(lambda x: x[1] == stop, final_res))
if len(filt) > 0:
# ignore same end, earlier start
continue
final_res.append(r)
return final_res
<file_sep>import numpy as np
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
# Some "private" helper attributes
self._cols = len(string1) + 1
self._rows = len(string2) + 1
self._traceback_matrix = np.array([[None] * self._cols for i in range(self._rows)])
self._alignments = []
# Some "private" helper attributes
self.align()
def fill_matrices(self):
"""
Fill in each cell of the score matrix according to Smith-Waterman algorithn,
and create the corresponding traceback matrix
"""
score_top_left = 0 # Score value for a diagonal step
score_top = 0 # Score value for an up step
score_left = 0 # Score value for a left step
for row in range(1, self._rows, 1):
for col in range(1, self._cols, 1):
# Needleman-Wunsch formula calculations
# Diagonal score
score_top_left = self.score_matrix[row - 1, col - 1] + self.substitution_matrix[self.string2[row-1]][self.string1[col-1]]
# Upper score
score_top = self.score_matrix[row - 1, col] + self.gap_penalty
# Left score
score_left = self.score_matrix[row, col - 1] + self.gap_penalty
# Cell's score
score = max(score_top_left, score_top, score_left, 0)
score2 = max(score_top_left, score_top, score_left)
self.score_matrix[row, col] = score
# Store step taken in traceback matrix
steps = []
if (score2 == score_top_left and score != 0):
steps.append("tl")
if (score2 == score_top and score != 0):
steps.append("tt")
if (score2 == score_left and score != 0):
steps.append("ll")
self._traceback_matrix[row, col] = steps
def __get_alignments(self):
"""
Get all the optimal alignments
"""
steps_stack = []
alignment_a = []
alignment_b = []
#indices = np.unravel_index(np.argmax(self.score_matrix[self._rows - 1], axis=None), self.score_matrix.shape)
indices = np.unravel_index(np.argmax(self.score_matrix, axis=None), self.score_matrix.shape)
if (indices == (0,0)):
return
row = indices[0] #row = self._rows - 1
col = indices[1]
current_cell = self.score_matrix[row, col]
while True:
# We reached the top left corner in the matrix, so we end the alignment
steps = []
if (current_cell == 0): #if (row, col) == (0, 0):
#if ("".join(alignment_b) != "" and "".join(alignment_a) != ""):
#alignment_a.insert(0, self.string2[row - 1])
#alignment_b.insert(0, self.string1[col - 1])
self._alignments.append(("".join(alignment_b), "".join(alignment_a)))
if (steps_stack == []): # End of alignment
break
else: # Perform step back in the matrix
(row, col, steps, alignment_a, alignment_b) = steps_stack.pop()
else: # Where do we move next?
if (self._traceback_matrix[row, col] != None):
steps = list(self._traceback_matrix[row, col])
if (steps != []):
step = steps.pop()
if (steps != []):
steps_stack.append((row, col, steps, list(alignment_a), list(alignment_b)))
if step == "tl":
alignment_a.insert(0, self.string2[row - 1])
alignment_b.insert(0, self.string1[col - 1])
current_cell = self.score_matrix[row -1, col - 1]
row -= 1
col -= 1
elif step == "tt":
alignment_b.insert(0, "-")
alignment_a.insert(0, self.string2[row - 1])
current_cell = self.score_matrix[row - 1, col]
row -= 1
elif step == "ll":
alignment_b.insert(0, self.string1[col - 1])
alignment_a.insert(0, "-")
current_cell = self.score_matrix[row, col - 1]
col -= 1
def align(self):
"""
Align given strings using the Smith-Waterman algorithm.
NB: score matrix and the substitution matrix are different matrices!
"""
self.fill_matrices()
self.__get_alignments()
def has_alignment(self):
"""
:return: True if a local alignment has been found, False otherwise
"""
has_alignment = False
if (self._alignments != []):
has_alignment = True
return has_alignment
def get_alignment(self):
"""
:return: alignment represented as a tuple of aligned strings
"""
alignment = ("", "")
if (self._alignments != []):
alignment = self._alignments[0]
return alignment
def is_residue_aligned(self, string_number, residue_index):
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been alined
False otherwise
"""
is_residue_aligned = False
if (self._alignments != []):
string = ""
alignment = self._alignments[0][string_number - 1]
alignment_idx_in_str = -1
valid_params = False
if (string_number == 1 and (residue_index >=0 and residue_index < len(self.string1))):
string = self.string1
alignment_idx_in_str = string.find(alignment.replace("-", ""))
if (alignment_idx_in_str >= 0 and alignment_idx_in_str < len(alignment)):
valid_params = True
elif (string_number == 2 and (residue_index >=0 and residue_index < len(self.string2))):
string = self.string2
alignment_idx_in_str = string.find(alignment.replace("-", ""))
if (alignment_idx_in_str >= 0 and alignment_idx_in_str < len(alignment)):
valid_params = True
else:
valid_params = False
string = ""
if (valid_params):
residue = string[residue_index]
if residue == alignment[residue_index - alignment_idx_in_str]:
is_residue_aligned = True
return is_residue_aligned
<file_sep>import numpy as np
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.alignments = [()]
self.score = 0
self.string1 = string1
self.string2 = string2
self.len1 = len(self.string1)
self.len2 = len(self.string2)
self.gap_penalty = gap_penalty
self.substituion_matrix = matrix
self.score_matrix = np.zeros((self.len2 + 1, self.len1 + 1), dtype=np.int)
self.align()
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
for x in range(0, self.len1+1):
if x == 0:
self.score_matrix[0,x] = 0
if x == self.len1:
self.score_matrix[0,x] = int(-x-1)
self.score_matrix[0,x] = int(-x)
print(self.score_matrix)
for x in range(0, self.len2+1):
if x == 0:
self.score_matrix[x,0] = 0
if x == self.len1:
self.score_matrix[x,0] = int(-x-1)
self.score_matrix[x,0] = int(-x)
# print(self.score_matrix)
for i in range(0, self.len2+1):
for j in range(0, self.len1+1):
if i == 0 and j == 0 :
self.score_matrix[i][j] = 0
continue
if i==0:
self.score_matrix[i][j] = j * self.gap_penalty
continue
if j ==0:
self.score_matrix[i][j] = i * self.gap_penalty
continue
isMatch = self.string1[j - 1] == self.string2[i - 1];
if (isMatch):
compScore = 1
else:
compScore = 0
upScore = self.score_matrix[i][j-1] + self.gap_penalty
sideScore = self.score_matrix[i-1][j] + self.gap_penalty
diagScore = compScore + self.score_matrix[i-1][j-1]
self.score_matrix[i][j] = np.max([upScore, sideScore, diagScore])
# print("--------------------")
# print(self.score_matrix)
# print("--------------------")
i = self.len2
j = self.len1
str1 = ""
str2 = ""
while i > 0 and j > 0:
score = self.score_matrix[i][j]
diag = self.score_matrix[i-1][j-1]
up = self.score_matrix[i][j-1]
side = self.score_matrix[i-1][j]
calc_score = self.score_func(self.string1[j-1],self.string2[i-1])
if score == diag + calc_score:
str1 += self.string1[j-1]
str2 += self.string2[i-1]
i = i - 1
j = j - 1
self.score += score
elif score == up + self.gap_penalty:
str1 += self.string1[j-1]
str2 += '-'
j = j - 1
self.score += score
elif score == side + self.gap_penalty:
str1 += '-'
str2 += self.string2[i-1]
i = i - 1
self.score += score
while j> 0:
str1 += self.string1[j-1]
str2 += '-'
j = j - 1
while i>0:
str1 += '-'
str2 += self.string2[i-1]
i = i - 1
str1 = str1[::-1]
str2 = str2[::-1]
print (str1)
print (str2)
print (self.score)
self.alignments.append((str1, str2))
print(self.alignments)
# print(self.score_matrix)
def score_func(self, firstId, secondId):
if firstId == secondId:
return 1
elif firstId == '-' or secondId == '-':
return -1
else:
return 0
def get_best_score(self):
"""
:return: the highest score for the aligned strings, int
"""
return self.score
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score
"""
# return 43
return len(self.alignments)
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
# return [
# ('ADMI-NS', 'ADMIRES'), ('ADMIN-S', 'ADMIRES')
# ]
return self.alignments
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as a list of lists
"""
# return [
# # [0, -1, -2, -3, -4, -5, -6],
# # [-1, 1, 0, -1, -2, -3, -4],
# # [-2, 0, 2, 1, 0, -1, -2],
# # [-3, -1, 1, 3, 2, 1, 0],
# # [-4, -2, 0, 2, 4, 3, 2],
# # [-5, -3, -1, 1, 3, 4, 3],
# # [-6, -4, -2, 0, 2, 3, 4],
# # [-7, -5, -3, -1, 1, 2, 4]
# ]
return self.score_matrix
def main():
# ga = GlobalAlignment("AVNCCEGQHI","ARNDEQ",-1)
# ga.align()
pass
if __name__ == '__main__':
main()<file_sep>##############
# Exercise 2.6
##############
import collections
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
return len(self.__sequences)
def get_total_length(self):
return len("".join(self.__sequences))
def get_average_length(self):
return self.get_total_length() / self.get_counts()
def add_sequence(self, seq):
self.__sequences.append(seq)
def sequences(self):
return self.__sequences
def read_fasta(self, path):
with open(path, "r") as f:
seq = ""
sequence_started = False
for line in f:
if line.startswith(">") or line.startswith(";"):
if sequence_started:
self.add_sequence(seq)
seq = ""
sequence_started = False
continue
sequence_started = True
seq += line.strip("*\n")
self.add_sequence(seq)
def get_abs_frequencies(self):
dict = collections.Counter("".join(self.__sequences))
# return number of occurrences not normalized by length
return dict
def get_av_frequencies(self):
dict = self.get_abs_frequencies()
for key in dict:
dict[key] /= self.get_total_length()
return dict
def aa_dist(self, orf):
dict = collections.Counter(orf)
for key in dict:
dict[key] /= len(orf)
return dict
# file_genome = "tests/genome.txt"
# file_fasta = "tests/tests.fasta"
# dist = AADist(file_fasta)
#
# print(dist.sequences())
# print(dist.get_counts())
# print(dist.get_average_length())
# print(dist.get_abs_frequencies())
# print(dist.get_av_frequencies())
<file_sep>##############
# Exercise 2.6
##############
from collections import Counter
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
# Counts the number of read sequences and returns the number as integer
return len(self.__sequences)
def get_average_length(self):
totalLength = 0.0
for seq_header, seq in self.__sequences:
totalLength = totalLength + len(seq)
return totalLength / self.get_counts()
def read_fasta(self, path):
# Takes a filename as parameter and reads in a file with multiple protein sequences in FASTA format
with open(path, "r") as f:
seq = ""
seq_header = ""
sequence_started = False
for line in f:
if line.startswith(">") or line.startswith(";"):
if sequence_started:
if seq[-1] == "*":
seq = seq[:-1]
self.__add_sequence(seq_header, seq)
seq = ""
sequence_started = False
seq_header = line.strip()
continue
sequence_started = True
seq += line.strip()
if seq[-1] == "*":
seq = seq[:-1]
self.__add_sequence(seq_header, seq)
def get_abs_frequencies(self):
# Counts the occurrence for every amino acid over all proteins
# return number of occurences not normalized by length
abs_freqs = Counter()
for seq_header, seq in self.__sequences:
abs_freqs = abs_freqs + Counter(seq)
return abs_freqs
def get_av_frequencies(self):
# Calculates the average amino acid composition over all read protein sequences
num_total_aa = 0
for seq_header, seq in self.__sequences:
num_total_aa = num_total_aa + len(seq)
dict_freqs = self.get_abs_frequencies()
for k,v in dict_freqs.items():
dict_freqs[k] = v / num_total_aa
return dict_freqs
def __add_sequence(self, seq_header, seq):
self.__sequences.append((seq_header, seq))<file_sep>import numpy as np
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.align()
def get_max_alg(self,i,y):
diag=self.score_matrix[i-1,y-1]+self.substitution_matrix[self.string1[y-1]][self.string2[i-1]]
left=self.score_matrix[i,y-1]+self.gap_penalty
up=self.score_matrix[i-1,y]+self.gap_penalty
if diag==max(diag,left,up,0):
self.trace[i][y]=1
if left==max(diag,left,up,0):
self.trace[i][y]=2
if up==max(diag,left,up,0):
self.trace[i][y]=3
return max(diag,left,up)
def align(self):
"""
Align given strings using the Smith-Waterman algorithm.
NB: score matrix and the substitution matrix are different matrices!
"""
self.trace=np.zeros((len(self.string2) + 1, len(self.string1) + 1), dtype=np.int)
for i in range(len(self.string2)+1):
self.score_matrix[i][0]=0
for i in range(len(self.string1)+1):
self.score_matrix[0][i]=0
##fill rest
for y in range (1,len(self.string1)+1):
for i in range(1,len(self.string2)+1):
self.score_matrix[i][y]=max(self.get_max_alg(i,y),0)
def has_alignment(self):
"""
:return: True if a local alignment has been found, False otherwise
"""
for i in range (len(self.string2)+1):
for y in range (1,len(self.string1)+1):
if self.score_matrix[i][y]>0:
return True
return False
def get_next_char(self,i,y,ali):
#ende
string2=ali[0]
string1=ali[1]
#print(string2,string1)
if self.score_matrix[i][y]==0:
ali=(string1,string2)
self.alignment=ali
self.start_alignment=(y,i)
current_score=self.score_matrix[i][y]
up=self.score_matrix[i-1][y]+self.gap_penalty
diag=self.score_matrix[i-1][y-1]+self.substitution_matrix[self.string2[i-1]][self.string1[y-1]]
left=self.score_matrix[i][y-1]+self.gap_penalty
out=0
temp=[]
if up==current_score:
self.get_next_char(i-1,y,(string2+self.string2[i-1],string1+"-"))
if diag==current_score:
self.get_next_char(i-1,y-1,(string2+self.string2[i-1],string1+self.string1[y-1]))
if left==current_score:
self.get_next_char(i,y-1,(string2+"-",string1+self.string1[y-1]))
def get_alignment(self):
end_i=0
end_y=0
max_score=np.amax(self.score_matrix)
for i in range (1,len(self.string2)+1):
for y in range (1,len(self.string1)+1):
if self.score_matrix[i][y]==max_score:
end_i=i
end_y=y
break
self.start_alignment=(-1,-1)
self.end_alignment=(end_y-1,end_i-1)
print(end_i,end_y)
print(max_score)
if max_score==0:
return ("","")
self.alignment=("","")
self.get_next_char(end_i,end_y,("",""))
st1=self.alignment[0][::-1]
st2=self.alignment[1][::-1]
print(st1,st2)
return (st1,st2)
def get_alignment_old(self):
"""
:return: alignment represented as a tuple of aligned strings
"""
max_score=np.amax(self.score_matrix)
for i in range (1,len(self.string2)+1):
for y in range (1,len(self.string1)+1):
if self.score_matrix[i][y]==max_score:
break
if max_score==0:
return ("","")
#print(max_score)
i=i-1
y=y-1
#self.ali=("","")
ali=("","")
print(self.score_matrix)
while True:
##find pred
current_score=self.score_matrix[i][y]
up=self.score_matrix[i-1][y]+self.gap_penalty
diag=self.score_matrix[i-1][y-1]+self.substitution_matrix[self.string2[i-1]][self.string1[y-1]]
left=self.score_matrix[i][y-1]+self.gap_penalty
not_found=True
if up==current_score:
ali=(ali[0]+self.string2[i-1],ali[1]+"-")
i=i-1
not_found=False
if diag==current_score and not_found:
ali=(ali[0]+self.string2[i-1],ali[1]+self.string1[y-1])
i=i-1
y=y-1
not_found=False
if left==current_score and not_found:
ali=(ali[0]+"-",ali[1]+self.string1[y-1])
y=y-1
if self.score_matrix[i][y]==0:
break
ali=(ali[1][::-1],ali[0][::-1])
print(ali)
print(self.trace)
return ali #("","")
def get_alignment_trace(self):
"""
trace IMPLEMENTATION
:return: alignment represented as a tuple of aligned strings
"""
max_score=np.amax(self.score_matrix)
for i in range (1,len(self.string2)+1):
for y in range (1,len(self.string1)+1):
if self.score_matrix[i][y]==max_score:
break
if max_score==0:
return("","")
print(max_score)
print(self.score_matrix)
print(self.trace)
print(i,y)
i=i-1
y=y-1
string1=""
string2=""
last=False
while True:
#if self.score_matrix[i][y]==0:
#diag
if self.trace[i][y]==1:
string1=string1+self.string1[y-1]
string2=string2+self.string2[i-1]
i=i-1
y=y-1
#left
if self.trace[i][y]==2:
string1=string1+self.string1[y-1]
string2=string2+"-"
y=y-1
#up
if self.trace[i][y]==3:
string1=string1+"-"
string2=string2+self.string2[i-1]
i=i-1
if last and i<1:
string1=string1+self.string1[y-1]
string2=string2+"-"
if last and y<1:
string1=string1+"-"
string2=string2+self.string2[i-1]
if last and y>0 and i>0:
string1=string1+self.string1[y-1]
string2=string2+self.string2[i-1]
if last:
break
if self.trace[i][y]==0:
last=True
ali=(string1[::-1],string2[::-1])
#self.ali=("","")
#ali=("","")
#print(self.trace)
print(ali)
return ali
def is_residue_aligned(self, string_number, residue_index):
ali=self.get_alignment()
print(self.start_alignment)
print(self.end_alignment)
if string_number==1:
check=self.string1[residue_index:residue_index+3]
# check_other=self.string2[residue_index+self.start_alignment[1]-self.start_alignment[0]:residue_index+self.start_alignment[1]-self.start_alignment[0]+3]
# print(check,check_other)
# if len(self.string1)<(residue_index+2):
# return False
return self.start_alignment[0] <= residue_index <= self.end_alignment[0]
if string_number==2:
check=self.string2[residue_index:residue_index+3]
check_other=self.string1[residue_index+self.start_alignment[0]-self.start_alignment[1]:residue_index+self.start_alignment[0]-self.start_alignment[1]+3]
print(check,check_other)
test=self.start_alignment[1] <= residue_index < (self.end_alignment[1]-1)
return test
def is_residue_aligned__(self, string_number, residue_index):
(ali)=self.get_alignment()
print(self.start_alignment)
print(self.end_alignment)
check=(self.string1[residue_index:residue_index+2],self.string2[residue_index:residue_index+2])
print("checkstring",check)
strings=(self.string1,self.string2)
#string contains
if not (check[string_number-1] in strings[string_number-1]):
return False
print("check1")
#check if other string contains -
if string_number==1:
index=self.start_alignment[0]-residue_index
print(index)
print("res",ali[1][index:index+3])
if "-" in ali[1][index:index+3]:
return False
else:
index=self.start_alignment[1]+residue_index
print(index)
if "-" in ali[0][index:index+3]:
return False
print("CHECK2")
#index
if residue_index>=self.start_alignment[string_number-1] and (residue_index+2)<=self.end_alignment[string_number-1] :
return True
return True
def is_residue_aligned_old(self, string_number, residue_index):
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been alined
False otherwise
"""
ali1,ali2=self.get_alignment()
print(self.start_alignment)
print(self.end_alignment)
#print(ali1)
#print(ali2)
#check lengths
if len(self.string1)<(residue_index+2) or len(self.string2)<(residue_index+2):
return False
out=True
if string_number==1:
#check first
check=self.string1[residue_index:residue_index+2]
if check in ali1:
return True
if string_number==2:
#check first
check=self.string2[residue_index:residue_index+2]
if check in ali2:
return True
#print(out)
return True
<file_sep>##############
# Exercise 2.6
##############
import collections
import aa_codon
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
return len(self.__sequences)
def merge(self, dicts):
return sum(dicts, collections.Counter())
def get_average_length(self):
sum_av = 0
for i in self.__sequences:
sum_av += len(i)
print(sum_av)
av = sum_av / len(self.__sequences)
return av
def read_fasta(self, path):
self.__sequences = aa_codon.read_fasta(path)
#print(self.__sequences)
def get_abs_frequencies(self):
dicts = list()
for seq in self.__sequences:
dictionary_cnt = aa_codon.aa_dist(seq)
dicts.append(dictionary_cnt)
return dict(self.merge(dicts))
def get_av_frequencies(self):
res = {}
total_length = 0
abs_freq = self.get_abs_frequencies()
for key, value in abs_freq.items():
total_length += value
for key, value in abs_freq.items():
res[key] = value / total_length
print(res)
return res
<file_sep>##############
# Exercise 2.6
##############
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
absolute_frequencies = {
'A': 0,
'R': 0,
'N': 0,
'D': 0,
'C': 0,
'E': 0,
'Q': 0,
'G': 0,
'H': 0,
'I': 0,
'L': 0,
'K': 0,
'M': 0,
'F': 0,
'P': 0,
'S': 0,
'T': 0,
'W': 0,
'Y': 0,
'V': 0,
}
av_frequencies = {
'A': 0,
'R': 0,
'N': 0,
'D': 0,
'C': 0,
'E': 0,
'Q': 0,
'G': 0,
'H': 0,
'I': 0,
'L': 0,
'K': 0,
'M': 0,
'F': 0,
'P': 0,
'S': 0,
'T': 0,
'W': 0,
'Y': 0,
'V': 0,
}
def __init__(self, filepath):
self.__sequences = self.get_sequences(filepath)
self.read_fasta(filepath)
def get_counts(self):
return len(self.__sequences)
def get_average_length(self):
sequenceslengths=[len(x[1]) for x in self.__sequences]
return sum(sequenceslengths)/len(sequenceslengths)
def read_fasta(self, path):
pass
def get_abs_frequencies(self):
# return number of occurences not normalized by length
totamin=""
for seq in self.__sequences:
totamin=totamin+seq[1]
for aminoacid in self.absolute_frequencies:
self.absolute_frequencies[aminoacid]=totamin.count(aminoacid)
return self.absolute_frequencies
def get_av_frequencies(self):
# return number of occurences normalized by length
totamin = ""
for seq in self.__sequences:
totamin = totamin + seq[1]
for aminoacid in self.av_frequencies:
self.av_frequencies[aminoacid] = totamin.count(aminoacid)/len(totamin)
return self.av_frequencies
def get_sequences(self, filepath):
fastafile = open(filepath, "r")
sequences=[]
sequence=[]
bigline=""
for line in fastafile:
if line == '\n':
bigline=bigline.replace('\n','')
bigline = bigline.replace('*', '')
bigline = bigline.replace('-', '')
sequence.append(bigline)
sequences.append(sequence)
sequence = []
bigline=""
else:
if '>' in line:
sequence.append(line)
else:
bigline=bigline+line
bigline = bigline.replace('\n', '')
bigline = bigline.replace('*', '')
bigline = bigline.replace('-', '')
sequence.append(bigline)
sequences.append(sequence)
return sequences
<file_sep>import itertools
from copy import deepcopy as copy
from contextlib import suppress
_filter = filter
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
all_words = itertools.product(ALPHABET, repeat=3)
all_words = [''.join(a) for a in list(all_words)]
def filter(*a, **kw):
return list(_filter(*a, **kw))
def avg(lst):
return sum(lst) / len(lst) if lst else 0
def simple_words(sequence):
ws = [sequence[i:i+3] for i in range(len(sequence)-2)]
return list(set(ws)) if isinstance(sequence, str) else ws
def score_for(matrix, w1, w2):
s = 0
for i in range(3):
s += matrix[AA_TO_INT[w1[i]]][AA_TO_INT[w2[i]]]
return s
def get_score(matrix, w1, w2, pos1=None, pos2=None):
if pos1 is not None:
w1 = w1[pos1:pos1+3]
w2 = w2[pos2:pos2+3]
if isinstance(w2, str):
return score_for(matrix, w1, w2)
s = 0
for i in range(3):
j = AA_TO_INT[w1[i]]
s += w2[i][j]
return s
def single_score_for(matrix, w1, w2):
s = matrix[AA_TO_INT[w1]][AA_TO_INT[w2]]
return s
def get_single_score(matrix, w1, w2, pos1=None, pos2=None):
if pos1 is not None:
w1 = w1[pos1]
w2 = w2[pos2]
if isinstance(w2, str):
return single_score_for(matrix, w1, w2)
j = AA_TO_INT[w1]
s = w2[j]
return s
def get_positions(matrix, w, sequence, pssm, T):
positions = []
sequence = sequence if sequence else pssm
for i in range(len(sequence)-2):
if get_score(matrix, w, sequence[i:i+3]) >= T:
positions.append(i)
return positions
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.sequences = []
self.words = []
self.n_sequences_for_word = {}
self.sequences_for_word = {}
self.num_words = []
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
self.sequences.append(sequence)
ws = simple_words(sequence)
for w in ws:
self.n_sequences_for_word[w] = self.n_sequences_for_word.get(
w, 0) + 1
self.sequences_for_word[w] = self.sequences_for_word.get(
w, [])
self.sequences_for_word[w].append(sequence)
self.num_words.append(len(ws))
self.words.extend(ws)
self.words = list(set(self.words))
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
return self.sequences_for_word.get(word, [])
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corresponding to the mentioned
statistics (in order of listing above).
"""
v = [n for _, n in self.n_sequences_for_word.items()]
return (len(self.sequences), len(self.words), round(avg(self.num_words)), round(avg(v)))
def indexes(word, sub):
words = []
start = 0
while True:
found = word[start:].find(sub)
if found == -1:
return words
start += found
words.append(start)
start += 1
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.subst = substitution_matrix
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
self.pssm = None
to_test = copy(all_words)
res = list()
ws = simple_words(sequence if sequence else pssm)
for w in ws:
removals = list()
for word in to_test:
if get_score(self.subst, word, w) >= T:
res.append(word)
removals.append(word)
for r in removals:
to_test.remove(r)
return res
def search_one_hit(self, blast_db: BlastDb, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplicates).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
ws = self.get_words(sequence=query, pssm=pssm, T=T)
q = query if query else pssm
l_q = len(q)
for w_q in ws:
for i_q in get_positions(self.subst, w_q, sequence=query, pssm=pssm, T=T):
for s in blast_db.get_sequences(w_q):
l_s = len(s)
for i_s in indexes(s, w_q):
highest_scoring = hsp = w_q
highest_score = score = get_score(
self.subst, hsp, q[i_q:i_q+3])
for direction in [1, -1]:
if direction == 1:
j_q = i_q + 3 # pointer query
j_s = i_s + 3 # pointer (target) sequence
else:
j_q = i_q - 1 # pointer query
j_s = i_s - 1 # pointer (target) sequence
while j_q >= 0 and j_s >= 0 and j_q < l_q and j_s < l_s:
delta = get_single_score(
self.subst, s, q, j_s, j_q)
if score + delta <= highest_score - X:
break
score += delta
if direction == 1:
hsp += s[j_s]
else:
hsp = s[j_s] + hsp
if score > highest_score:
highest_score = score
highest_scoring = hsp
j_q += direction
j_s += direction
score = highest_score
hsp = highest_scoring
if direction == 1:
right_len = len(hsp)
if score >= S:
res = (i_q - (len(hsp) - right_len), i_s -
(len(hsp) - right_len), len(hsp), score)
d[s] = d.get(s, [])
if res not in d[s]:
d[s].append(res)
return d
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplicates).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
index = {}
ws = self.get_words(sequence=query, pssm=pssm, T=T)
q = query if query else pssm
l_q = len(q)
for w_q in ws:
for i_q in get_positions(self.subst, w_q, sequence=query, pssm=pssm, T=T):
for s in blast_db.get_sequences(w_q):
for i_s in indexes(s, w_q):
index[(s, i_q-i_s)] = index.get((s, i_q-i_s), [])
index[(s, i_q-i_s)].append((w_q, i_q, i_s))
for s_, ts in index.items():
s, distance = s_
with suppress(Exception):
ts[1] # min 2 length
l_s = len(s)
ts.sort(key=lambda t: t[2])
leftmost = -1
for i in range(1, len(ts)): # 629 924
w_q, i_q, i_s = ts[i]
offset = 1
while i - offset >= 0:
prev_i_s = ts[i-offset][2]
if i_s - prev_i_s > A:
prev_i_s = -1
break # wrong
if i_s - prev_i_s >= 3: # correct
break
prev_i_s = -1
offset += 1
if prev_i_s < 0:
continue
if prev_i_s < leftmost:
continue
highest_scoring = hsp = w_q
highest_score = score = get_score(
self.subst, hsp, q[i_q:i_q+3])
did_bridge = False
for direction in [-1, 1]:
if direction == 1:
j_q = i_q + 3 # pointer query
j_s = i_s + 3 # pointer (target) sequence
else:
j_q = i_q - 1 # pointer query
j_s = i_s - 1 # pointer (target) sequence
offset = 0
while j_q >= 0 and j_s >= 0 and j_q < l_q and j_s < l_s:
delta = get_single_score(
self.subst, s, q, j_s, j_q)
if score + delta <= highest_score - X:
offset + 1
break
score += delta
if direction == 1:
hsp += s[j_s]
else:
hsp = s[j_s] + hsp
if score > highest_score:
highest_score = score
highest_scoring = hsp
j_q += direction
j_s += direction
score = highest_score
hsp = highest_scoring
if direction == -1:
left_len = len(hsp)
if j_s + offset <= prev_i_s + 2:
did_bridge = True
else:
# assert False, "breakpoint"
break
if did_bridge:
leftmost = i_s - (left_len - 3) + len(hsp)
if score >= S:
res = (i_q - (left_len - 3), i_s -
(left_len - 3), len(hsp), score)
d[s] = d.get(s, [])
if res not in d[s]:
d[s].append(res)
return d<file_sep>import numpy as np
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substituion_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.align()
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
def get_best_score(self):
"""
:return: the highest score for the aligned strings, int
"""
return 4
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score
"""
return 43
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
return [
('ADMI-NS', 'ADMIRES'), ('ADMIN-S', 'ADMIRES')
]
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as a list of lists
"""
return [
[0, -1, -2, -3, -4, -5, -6],
[-1, 1, 0, -1, -2, -3, -4],
[-2, 0, 2, 1, 0, -1, -2],
[-3, -1, 1, 3, 2, 1, 0],
[-4, -2, 0, 2, 4, 3, 2],
[-5, -3, -1, 1, 3, 4, 3],
[-6, -4, -2, 0, 2, 3, 4],
[-7, -5, -3, -1, 1, 2, 4]
]
<file_sep>##############
# Exercise 2.6
##############
from collections import Counter
import re
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
return len(self.__sequences)
def add_sequence(self, seq):
self.__sequences.append(seq)
def get_average_length(self):
total = 0
for seq in self.__sequences:
total += len(seq)
return float(total) / float(self.get_counts())
def aa_dist(self, aa_seq):
counted = Counter(aa_seq)
for key in counted:
counted[key] /= len(aa_seq)
return counted
def read_fasta(self, filename):
with open(filename, "r") as f:
seq = ""
sequence_started = False
for line in f:
if line.startswith(">") or line.startswith(";"):
if sequence_started:
self.add_sequence(seq)
seq = ""
sequence_started = False
continue
sequence_started = True
seq += re.sub("[^ARNDCQEGHILKMFPSTWYV]+", '', line)
if len(seq) > 0:
self.add_sequence(seq)
def get_abs_frequencies(self):
a = {c:0 for c in "ARNDCQEGHILKMFPSTWYV"}
for seq in self.__sequences:
for c in seq:
a[c] = a[c]+1
return a
def get_av_frequencies(self):
total_length = 0
a = {c:0 for c in "ARNDCQEGHILKMFPSTWYV"}
for seq in self.__sequences:
total_length += len(seq)
for c in seq:
a[c] = a[c]+1
b = {key : value/total_length for (key, value) in a.items()}
return b<file_sep>import numpy as np
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substituion_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.align()
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
# score matrix has len(string2)+1 rows, len(string1)+1 cols
row, col = self.score_matrix.shape
self.score_matrix[0,0] = 0
for i in range(1,row,1): self.score_matrix[i,0] = self.score_matrix[i-1,0]+self.gap_penalty
for i in range(1,col,1): self.score_matrix[0,i] = self.score_matrix[0,i-1]+self.gap_penalty
for i in range(1,row,1): # string2
for j in range(1,col,1): #string1
s1 = self.string1[j-1]
s2 = self.string2[i-1]
score = self.substituion_matrix[s1][s2]
candidate = np.asarray([self.score_matrix[i,j-1]+self.gap_penalty,self.score_matrix[i-1,j]+self.gap_penalty,self.score_matrix[i-1,j-1]+score])
self.score_matrix[i,j] = np.max(candidate)
def get_best_score(self):
"""
:return: the highest score for the aligned strings, int
"""
# return np.max(self.score_matrix)
return self.score_matrix[-1,-1]
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score
"""
result = self.get_alignments()
return len(result)
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
result = []
def trace_back(i,j,align1,align2):
# 1,append current to align12
# stop point append the align into result list to deal with split situation in recursion
if i == 0 and j == 0:
result.append((align1,align2))
else:
current_score = self.score_matrix[i, j]
# 2,if current == diag
s1 = self.string1[j - 1]
s2 = self.string2[i - 1]
score = self.substituion_matrix[s1][s2]
if current_score == self.score_matrix[i - 1, j - 1] + score:
# print(s1 + align1)
trace_back(i - 1, j - 1, s1 + align1, s2 + align2)
# 3,if current == left + gap
if current_score == self.score_matrix[i - 1, j] + self.gap_penalty:
# print('_' + align1)
s2 = self.string2[i-1]
trace_back(i - 1, j, '-' + align1, s2+align2)
# 3,if current == top + gap
if current_score == self.score_matrix[i, j - 1] + self.gap_penalty:
# print("_" + align2)
s1 = self.string1[j-1]
trace_back(i, j - 1, s1+align1, '-' + align2)
row, col = self.score_matrix.shape
trace_back(row-1,col-1,"","")
return result
# return [
# ('ADMI-NS', 'ADMIRES'), ('ADMIN-S', 'ADMIRES')
# ]
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as a list of lists
"""
return self.score_matrix
# return [
# [0, -1, -2, -3, -4, -5, -6],
# [-1, 1, 0, -1, -2, -3, -4],
# [-2, 0, 2, 1, 0, -1, -2],
# [-3, -1, 1, 3, 2, 1, 0],
# [-4, -2, 0, 2, 4, 3, 2],
# [-5, -3, -1, 1, 3, 4, 3],
# [-6, -4, -2, 0, 2, 3, 4],
# [-7, -5, -3, -1, 1, 2, 4]
# ]
if __name__ == '__main__':
from tests.matrices import MATRICES
import json
with open("/Users/wangyu/Documents/Protein_prediction/exercise/pp1ss19exercise3-exercise-ge56sen/tests/global_test.json") as json_file:
json_data = json.load(json_file)
# print(json_data)
# g = GlobalAlignment("CRYVPST", "WYVPSAT",-1,MATRICES["identity"])
# g.align()
# g = GlobalAlignment("SEQWENCE", "SEQWENCE",-6,MATRICES["blosum"])
# g.align()
# print(g.score_matrix)
# a = np.where(g.score_matrix!= np.array(json_data['mismatching']['score_matrix']))
# print(a)
# "large": {
# "strings": ["AVNCCEGQHI", "ARNDEQ"],
# "gap_penalty": -1,
# "matrix": "identity",
# "best_score": 0,
# "number_of_alignments": 2,
# "alignments": [["AVNCCEGQHI", "ARN-DE-Q--"], ["AVNCCEGQHI", "ARND-E-Q--"]]
# }
# g = GlobalAlignment("AVNCCEGQHI","ARNDEQ",-1,MATRICES["identity"])
# b = g.get_best_score()
# print(b)
# print(g.score_matrix)
# a = g.get_alignments()
# print(a)
# b = g.get_number_of_alignments()
# print(b)
# "small": {
# "strings": ["SCYTHE", "SCTHE"],
# "gap_penalty": -6,
# "matrix": "blosum",
# "best_score": 25,
# "number_of_alignments": 1,
# "alignments": [["SCYTHE", "SC-THE"]]
# },
g = GlobalAlignment("SCYTHE","SCTHE",-6,MATRICES["blosum"])
b = g.get_best_score()
print(b)
b = g.get_alignments()
print(b)
b = g.get_score_matrix()
print(b)
<file_sep>##############
# Exercise 2.7
##############
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
positives = ["R", "K", "H"]
if aa in positives:
return True
else:
return False
def isNegativelyCharged(aa):
negatives = ["E", "D"]
if aa in negatives:
return True
else:
return False
def isHydrophobic(aa):
hydrofobics = ["V", "I", "A", "L", "M", "F", "Y", "W"]
if aa in hydrofobics:
return True
else:
return False
def isAromatic(aa):
aromatics = ["F", "W", "Y", "H"]
if aa in aromatics:
return True
else:
return False
def isPolar(aa):
polars = ["R", "N", "D", "Q", "E", "H", "K", "S", "T", "Y"]
if aa in polars:
return True
else:
return False
def isProline(aa):
prolines = ["P"]
if aa in prolines:
return True
else:
return False
def containsSulfur(aa):
sulfurus = ["M", "C"]
if aa in sulfurus:
return True
else:
return False
def isAcid(aa):
acids = ["D", "E"]
if aa in acids:
return True
else:
return False
def isBasic(aa):
basics = ["R", "H", "K"]
if aa in basics:
return True
else:
return False<file_sep>##############
# Exercise 2.5
##############
# You can use the supplied test cases for your own testing. Good luck!
def read_genome(file):
lines = open(file, "r")
genome = ""
for line in lines:
genome += line.strip()
lines.close()
return genome
codon_dict = {
# Decode DNA triplets to codons
'TTT': 'F',
'TTC': 'F',
'TTA': 'L',
'TTG': 'L',
'TCT': 'S',
'TCC': 'S',
'TCA': 'S',
'TCG': 'S',
'TAT': 'Y',
'TAC': 'Y',
'TAA': '*',
'TAG': '*',
'TGT': 'C',
'TGC': 'C',
'TGA': '*',
'TGG': 'W',
'CTT': 'L',
'CTC': 'L',
'CTA': 'L',
'CTG': 'L',
'CCT': 'P',
'CCC': 'P',
'CCA': 'P',
'CCG': 'P',
'CAT': 'H',
'CAC': 'H',
'CAA': 'Q',
'CAG': 'Q',
'CGT': 'R',
'CGC': 'R',
'CGA': 'R',
'CGG': 'R',
'ATT': 'I',
'ATC': 'I',
'ATA': 'I',
'ATG': 'M',
'ACT': 'T',
'ACC': 'T',
'ACA': 'T',
'ACG': 'T',
'AAT': 'N',
'AAC': 'N',
'AAA': 'K',
'AAG': 'K',
'AGT': 'S',
'AGC': 'S',
'AGA': 'R',
'AGG': 'R',
'GTT': 'V',
'GTC': 'V',
'GTA': 'V',
'GTG': 'V',
'GCT': 'A',
'GCC': 'A',
'GCA': 'A',
'GCG': 'A',
'GAT': 'D',
'GAC': 'D',
'GAA': 'E',
'GAG': 'E',
'GGT': 'G',
'GGC': 'G',
'GGA': 'G',
'GGG': 'G'
}
def complementary(sequence):
# Find complementary sequence
ret = sequence
ret = ret.replace('A', 'X')
ret = ret.replace('T', 'A')
ret = ret.replace('X', 'T')
ret = ret.replace('C', 'X')
ret = ret.replace('G', 'C')
ret = ret.replace('X', 'G')
return ret
def codons_to_aa(orf):
orf = orf.upper()
if len(orf) % 3 is not 0:
return None
codons = [orf[i:i + 3] for i in range(0, len(orf), 3)]
aa_seq = ''.join(codon_dict[c] for c in codons)
return aa_seq
def get_orfs(genome):
genome = genome.upper()
n = len(genome)
ret = []
# Check DNA for validity
for char in genome:
if char != 'A' and char != 'C' and char != 'T' and char != 'G':
raise TypeError('Invalid DNA sequence!')
# Primary strand
# Create circle: append to itself
circle = [genome, genome]
circle = ''.join(circle)
frames = get_frames(circle)
for i, frame in enumerate(frames):
start = True
hits = 0
tmp = [[]]
for j, char in enumerate(codons_to_aa(frame)):
if start:
if char == 'M':
tmp[hits].append(j * 3 + i)
start = False
else:
if char == '*':
start = True
tmp[hits].append(j * 3 + i)
tmp.append([])
hits += 1
for x in tmp:
if len(x) != 0 and len(x) != 1:
if len(codons_to_aa(frame[x[0]:x[1]])) >= 34:
if x[0] < n:
y = x[1]
if y + 2 > n:
y = x[1] - n
ret.append((x[0], y + 2, codons_to_aa(circle[x[0]:x[1]]), False))
# Reverse complementary strand
reverse_genome = complementary(genome)[::-1]
reverse_circle = [reverse_genome, reverse_genome]
reverse_circle = ''.join(reverse_circle)
frames = get_frames(reverse_circle)
for i, frame in enumerate(frames):
start = True
hits = 0
tmp = [[]]
for j, char in enumerate(codons_to_aa(frame)):
if start:
if char == 'M':
tmp[hits].append(j * 3 + i)
start = False
else:
if char == '*':
start = True
tmp[hits].append(j * 3 + i)
tmp.append([])
hits += 1
for x in tmp:
if len(x) != 0 and len(x) != 1:
if len(codons_to_aa(frame[x[0]:x[1]])) >= 34:
if x[0] < n:
y = x[1]
if x[1] + 2 > n:
y = x[1] - n
ret.append((abs(x[0] - n) - 1, abs(y + 2 - n) - 1,
codons_to_aa(reverse_circle[x[0]:x[1]]), True))
for r in ret:
x = [d for d in ret if d[1] == r[1]]
if len(x) != 1:
if len(x[0][2]) > len(x[1][2]):
ret.remove(x[1])
else:
ret.remove(x[0])
x = [d for d in ret if (d[0] >= len(genome) or d[0] < 0) or (d[1] >= len(genome) or d[1] < 0)]
for d in x:
ret.remove(d)
return ret
def get_frames(sequence):
size = int(len(sequence) / 3) * 3
frame1 = sequence[:size]
frame2 = sequence[1:]
size = int(len(frame2) / 3) * 3
frame2 = frame2[:size]
frame3 = sequence[2:]
size = int(len(frame3) / 3) * 3
frame3 = frame3[:size]
return [frame1, frame2, frame3]<file_sep>import numpy as np
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
if len(sequences) < 1:
raise TypeError("Empty sequence list")
L = len(sequences[0])
for s in sequences:
if len(s) != L:
raise TypeError("Wrong length of a sequence")
for c in s:
if c not in ALPHABET:
raise TypeError("Wrong character in the string")
self.sequences = sequences
#Amino acid occurence in different columns of the MSA
#[AA][Column]
AAO = np.zeros((len(ALPHABET), len(sequences[0])))
for i in range(len(AAO)):
for j in range(len(AAO[i])):
for s in sequences:
if s[j] == ALPHABET[i]:
AAO[i][j] += 1
R = np.zeros(len(sequences[0]))
for i in range(len(sequences[0])):
R[i] = len([x for x in AAO[:, i] if x != 0])
self.R = R
self.AAO = AAO
pass
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
S = self.sequences
primary_sequence = self.get_primary_sequence()
L = len(primary_sequence)
sequences_reduced = []
for s in S:
new_s_list = []
for i in range(len(S[0])):
if S[0][i] != "-":
new_s_list.append(s[i])
sequences_reduced.append("".join(new_s_list))
weights = np.ones(len(self.sequences[0]))
if use_sequence_weights == True:
weights = self.get_sequence_weights()
## Calculation of background frequencies
if bg_matrix == None:
bg_matrix = 0.0025 * np.ones((20, 20))
bg_vector = np.sum(bg_matrix, axis = 0)
## Calculation of f and redistribute_gaps
if redistribute_gaps == True:
alphabet_len = len(ALPHABET)
else:
alphabet_len = len(ALPHABET) - 1
f = np.zeros((L, alphabet_len))
for i in range(L):
for j in range(alphabet_len):
for s in range(len(sequences_reduced)):
if sequences_reduced[s][i] == ALPHABET[j]:
f[i][j] += weights[s]
if redistribute_gaps == True:
new_f = np.zeros((L, alphabet_len))
for i in range(L):
for j in range(len(ALPHABET[0:-1])):
new_f[i][j] = f[i][j] + f[i][-1]*bg_vector[j]
f = new_f
## Calculation of the pseudocount matrix
g = np.zeros((L, len(ALPHABET[0:-1])))
for i in range(L):
for a in range(len(ALPHABET[0:-1])):
for j in range(len(ALPHABET[0:-1])):
g[i][a] += (f[i][j]/bg_vector[j]) * bg_matrix[j][a]
for i in range(L):
for j in range(len(ALPHABET[0:-1])):
if add_pseudocounts == True:
alpha = self.get_number_of_observations() - 1
f[i][j] = (alpha * f[i][j] + beta * g[i][j]) / (alpha + beta)
f = [c / np.sum(c) for c in f]
pssm = np.zeros((L, 20))
for i in range(L):
for j in range(len(ALPHABET[0:-1])):
value = f[i][j]/bg_vector[j]
if value == 0:
pssm[i][j] = -20
else:
pssm[i][j] = round(2*np.log2(value))
return np.rint(pssm).astype(np.int64)
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return (len(self.sequences), len(self.sequences[0]))
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
return self.sequences[0].replace("-","")
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
S = self.sequences
L = len(self.sequences)
weights = np.zeros(L)
AAO = self.AAO
R = self.R
for s in range(L):
for i in range(len(S[0])):
sik = AAO[ALPHABET.index(S[s][i])][i]
if R[i] != 1:
weights[s] += 1/(R[i]*sik)
return weights.astype(np.float64)
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
L = len(self.sequences[1])
R = self.R
num_obs = np.sum(R)/L
return num_obs.astype(np.float64)
<file_sep>##############
# Exercise 2.6
##############
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
return len(self.__sequences)
def get_average_length(self):
lengthofSequences = 0
for i in self.__sequences:
lengthofSequences += len(i)
returnInt = lengthofSequences/self.get_counts()
return returnInt
##############
# Exercise 2.4
##############
def read_fasta(self, path):
with open(path, "r") as f:
seq = ""
sequence_started = False
for line in f:
if line.startswith(">") or line.startswith(";"):
if sequence_started:
self.__sequences.append(seq)
seq = ""
sequence_started = False
continue
sequence_started = True
seq += line.strip()
if "*" in seq:
seq = seq[:-1]
self.__sequences.append(seq)
def get_abs_frequencies(self):
aa_dict = {}
for i in self.__sequences:
for j in i:
if j in aa_dict:
aa_dict[j] = aa_dict.get(j) + 1
else:
aa_dict[j] = 1
# return number of occurences not normalized by length
return aa_dict
def get_av_frequencies(self):
total_aa = self.get_counts() * self.get_average_length()
aa_dict = self.get_abs_frequencies()
for i, j in aa_dict.items():
avg_j = j / total_aa
aa_dict[i] = avg_j
# return number of occurences normalized by length
return aa_dict
<file_sep>import numpy as np
import re
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
self.sequences = sequences
self.sizeMSA = len(self.sequences) #should be more than 0
if self.sizeMSA < 1:
raise TypeError
self.lenSeq = len(sequences[0]) #should be equal among all sequences
for seq in sequences:
if len(seq) != self.lenSeq:
raise TypeError
for char in seq: #should consist of only valid characters
if AA_TO_INT.get(char) == None:
raise TypeError
self.ri = np.zeros(self.lenSeq)
for i in range(self.lenSeq):
ri_set = set()
for seq in self.sequences:
ri_set.add(seq[i])
self.ri[i] = len(ri_set)
s = np.zeros((self.lenSeq,self.sizeMSA))
for i in range(s.shape[0]):
for k in range(s.shape[1]):
aa = self.sequences[k][i]
count = 0
for seq in self.sequences:
if seq[i] == aa:
count += 1
s[i][k] = count
self.weights = np.zeros(self.sizeMSA)
for j in range(s.shape[1]):
w = 0
for i in range(s.shape[0]):
if self.ri[i] > 1:
w = w + (1/(self.ri[i]*s[i][j]))
self.weights[j] = w
self.independent_observations = np.sum(self.ri)/len(self.ri)
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
aa_counts = np.zeros((self.lenSeq,len(ALPHABET)))
if use_sequence_weights == False: #don't use sequence weights
for aa_index in range(len(ALPHABET)):
for pos in range(self.lenSeq):
count = 0
for seq in self.sequences:
if seq[pos] == ALPHABET[aa_index]:
count = count + 1
aa_counts[pos,aa_index] = count
else: #use sequence weights
for aa_index in range(len(ALPHABET)):
for pos in range(self.lenSeq):
f = 0
for idx in range(self.sizeMSA):
if self.sequences[idx][pos] == ALPHABET[aa_index]:
f = f + self.weights[idx]
aa_counts[pos,aa_index] = f
bg_props = np.full(20,0.05)
bg_props_matrix = np.full((20,20),0.0025)
if bg_matrix != None: #background prob matrix given
bg_props_matrix = bg_matrix
bg_props = np.sum(bg_matrix,axis=0)
redist_gaps = np.zeros((self.lenSeq,len(ALPHABET)-1))
if redistribute_gaps == True: #redistribute gaps
for i in range(redist_gaps.shape[0]):
num_gaps = aa_counts[i][-1]
for j in range(redist_gaps.shape[1]):
redist_gaps[i][j] = aa_counts[i][j] + (num_gaps*bg_props[j])
else: #don't distribute gaps
redist_gaps = aa_counts.copy()
redist_gaps = np.delete(redist_gaps,-1,axis=1)
pseudocounts = redist_gaps.copy()
if add_pseudocounts == True: #add pseudocounts
alpha = self.independent_observations - 1
for i in range(pseudocounts.shape[0]):
for a in range(pseudocounts.shape[1]):
p_count = 0
for j in range(redist_gaps.shape[1]):
p_count += redist_gaps[i][j]*bg_props_matrix[j][a]/bg_props[j]
pseudocounts[i][a] = ((alpha*pseudocounts[i][a])+(beta*p_count))/(alpha+beta)
#normalize
for i in range(pseudocounts.shape[0]):
row_sum = np.sum(pseudocounts[i])
for j in range(pseudocounts.shape[1]):
pseudocounts[i][j] /= row_sum
#divide by background probabilities
for i in range(pseudocounts.shape[0]):
for j in range(pseudocounts.shape[1]):
pseudocounts[i][j] /= bg_props[j]
#build PSSM with scores
nonzero_cols_primary_seq = []
for i in range(self.lenSeq):
if self.sequences[0][i] != "-":
nonzero_cols_primary_seq.append(i)
pssm = np.zeros((len(nonzero_cols_primary_seq),len(ALPHABET)-1))
for i in range(pssm.shape[0]):
for j in range(pssm.shape[1]): # score = -inf case
if pseudocounts[nonzero_cols_primary_seq[i]][j] == 0:
pssm[i][j] = -20
else:
pssm[i][j] = 2*np.log2(pseudocounts[nonzero_cols_primary_seq[i]][j])
pssm = np.rint(pssm)
return pssm.astype(np.int64)
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
return (self.sizeMSA, self.lenSeq)
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
return re.sub("-","",self.sequences[0])
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
return self.weights.astype(np.float64)
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
return self.independent_observations.astype(np.float64)
<file_sep>import numpy as np
import json
from pathlib import Path
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
class BlastDb:
def __init__(self):
self.databaseStructure = []
"""
Initialize the BlastDb class.
"""
pass
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
self.databaseStructure.append(sequence)
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
sequenceContaining = []
for i in range(0, len(self.databaseStructure)):
if word in self.databaseStructure[i]:
sequenceContaining.append(self.databaseStructure[i])
return sequenceContaining
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
seqInDatabase = len(self.databaseStructure)
differentWords = set()
allWords = []
differentWordSets = [set() for i in range(seqInDatabase)]
for i in range(0, seqInDatabase):
for x in range(0, len(self.databaseStructure[i]) - 2):
word = self.databaseStructure[i][x:x+3]
differentWordSets[i].add(word)
differentWords.add(word)
allWords.append(word)
numberOfDifferentWords = len(differentWords)
averageNumberOfDifferentWordsPerSequence = 0
for i in range(0, seqInDatabase):
averageNumberOfDifferentWordsPerSequence += len(differentWordSets[i])
averageNumberOfDifferentWordsPerSequence = int(round(averageNumberOfDifferentWordsPerSequence / seqInDatabase))
wordsFoundInSequences = 0
for word in differentWords:
for i in range(0, seqInDatabase):
if word in self.databaseStructure[i]:
wordsFoundInSequences += 1
averageNumberOfSequencesContainingEachWord = 0
averageNumberOfSequencesContainingEachWord = int(round(wordsFoundInSequences/numberOfDifferentWords))
print(seqInDatabase, numberOfDifferentWords, averageNumberOfDifferentWordsPerSequence, averageNumberOfSequencesContainingEachWord)
return tuple((seqInDatabase, numberOfDifferentWords, averageNumberOfDifferentWordsPerSequence, averageNumberOfSequencesContainingEachWord))
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.substitution_matrix = substitution_matrix
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
alphabetLenComputed = len(ALPHABET)
uniqueWordList = []
if pssm is None:
maxValue = np.amax(self.substitution_matrix)
seqLen = len(sequence)
for i in range(0, seqLen - 2):
for x in range(0, alphabetLenComputed):
scoreX = self.substitution_matrix[x][AA_TO_INT[sequence[i]]]
for y in range(0, alphabetLenComputed):
if (scoreX + maxValue + maxValue) < T:
break
scoreY = self.substitution_matrix[y][AA_TO_INT[sequence[i + 1]]]
for z in range(0, alphabetLenComputed):
if (scoreX + scoreY + maxValue) < T:
break
scoreZ = self.substitution_matrix[z][AA_TO_INT[sequence[i + 2]]]
wordScore = scoreX + scoreY + scoreZ
if wordScore >= T:
word = ALPHABET[x] + ALPHABET[y] + ALPHABET[z]
uniqueWordList.append(word)
#print(word, i, sequence[i] + sequence[i + 1] + sequence[i + 2])
return set(uniqueWordList)
else:
pssmColLen = len(pssm)
maxValue = np.amax(pssm)
for i in range(0, pssmColLen - 2):
for x in range(0, alphabetLenComputed):
scoreX = pssm[i][x]
for y in range(0, alphabetLenComputed):
if (scoreX + maxValue + maxValue) < T:
break
scoreY = pssm[i + 1][y]
for z in range(0, alphabetLenComputed):
if (scoreX + scoreY + maxValue) < T:
break
scoreZ = pssm[i + 2][z]
wordScore = scoreX + scoreY + scoreZ
if wordScore >= T:
word = ALPHABET[x] + ALPHABET[y] + ALPHABET[z]
uniqueWordList.append(word)
return set(uniqueWordList)
def get_words_with_index(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
alphabetLenComputed = len(ALPHABET)
uniqueWordList = []
if pssm is None:
maxValue = np.amax(self.substitution_matrix)
seqLen = len(sequence)
for i in range(0, seqLen - 2):
for x in range(0, alphabetLenComputed):
scoreX = self.substitution_matrix[x][AA_TO_INT[sequence[i]]]
for y in range(0, alphabetLenComputed):
if (scoreX + maxValue + maxValue) < T:
break
scoreY = self.substitution_matrix[y][AA_TO_INT[sequence[i + 1]]]
for z in range(0, alphabetLenComputed):
if (scoreX + scoreY + maxValue) < T:
break
scoreZ = self.substitution_matrix[z][AA_TO_INT[sequence[i + 2]]]
wordScore = scoreX + scoreY + scoreZ
if wordScore >= T:
word = ALPHABET[x] + ALPHABET[y] + ALPHABET[z]
uniqueWordList.append((word, i))
return uniqueWordList
else:
pssmColLen = len(pssm)
maxValue = np.amax(pssm)
for i in range(0, pssmColLen - 2):
for x in range(0, alphabetLenComputed):
scoreX = pssm[i][x]
for y in range(0, alphabetLenComputed):
if (scoreX + maxValue + maxValue) < T:
break
scoreY = pssm[i + 1][y]
for z in range(0, alphabetLenComputed):
if (scoreX + scoreY + maxValue) < T:
break
scoreZ = pssm[i + 2][z]
wordScore = scoreX + scoreY + scoreZ
if wordScore >= T:
word = ALPHABET[x] + ALPHABET[y] + ALPHABET[z]
uniqueWordList.append((word, i))
return uniqueWordList
def get_hsp_score(self, startPos, endPos, query, word):
score = 0
#print('q: ', query)
##print('w: ', word)
# print('startpos:', startPos, 'endPos:', endPos)
for s in range(startPos, endPos):
score += self.substitution_matrix[AA_TO_INT[query[s]]][AA_TO_INT[word[s - startPos]]]
#print(query[s], word[s - startPos], ' -- s: ', s, 'queryLen: ', len(query), 'lenWord: ', len(word), 'wordPos:', s - startPos, 'partScore: ', self.substitution_matrix[AA_TO_INT[query[s]]][AA_TO_INT[word[s - startPos]]], ' totalScore: ', score)
return score
def get_hsp_for_string(self, string1, string2):
score = 0
for s in range(0, len(string1)):
score += self.substitution_matrix[AA_TO_INT[string1[s]]][AA_TO_INT[string2[s]]]
# print(query[s], word[s - startPos], ' -- s: ', s, 'queryLen: ', len(query), 'lenWord: ', len(word), 'wordPos:', s - startPos, 'partScore: ', self.substitution_matrix[AA_TO_INT[query[s]]][AA_TO_INT[word[s - startPos]]], ' totalScore: ', score)
return score
def find_all(self, string, substring):
"""
Function: Returning all the index of substring in a string
Arguments: String and the search string
Return:Returning a list
"""
length = len(substring)
c=0
indexes = []
while c < len(string):
if string[c:c+length] == substring:
indexes.append(c)
c=c+1
return indexes
def search_one_hit_pssm(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
wordsOfQuerySeq = self.get_words(sequence=query, pssm=pssm, T=T)
targetSequences = []
for word in wordsOfQuerySeq:
targetSequences += blast_db.get_sequences(word)
targetSequences = set(targetSequences)
wordsToStartInQuerySequence = self.get_words_with_index(sequence=query, T=T, pssm=pssm)
wordsToStartInQuerySequenceSize = len(wordsToStartInQuerySequence)
pssmColLen = len(pssm)
result = dict()
for targetSequence in targetSequences:
currentTargetSeq = targetSequence
hspAligns = []
wordPositionInQuery = 0
for wi in range(0, wordsToStartInQuerySequenceSize):
wordPositionInQuery = wordsToStartInQuerySequence[wi][1]
startWord = wordsToStartInQuerySequence[wi][0]
if startWord not in currentTargetSeq:
continue
offsetsOfWordsInTargetSequence = self.find_all(currentTargetSeq, startWord)
for targetSeqOffset in offsetsOfWordsInTargetSequence:
extendRightStart = targetSeqOffset + 2
extendLeftStart = targetSeqOffset
currentPos = extendRightStart
currentWord = startWord
maxScore = pssm[wordPositionInQuery][AA_TO_INT[currentWord[0]]] + pssm[wordPositionInQuery + 1][AA_TO_INT[currentWord[1]]] + pssm[wordPositionInQuery + 2][AA_TO_INT[currentWord[2]]]
newScore = maxScore
maxWordTargetInQuery = extendLeftStart
maxSequence = currentWord # in case right extend immidiatly breaks
#extend right
while (wordPositionInQuery + len(currentWord)) < pssmColLen and currentPos < len(currentTargetSeq) - 1:
currentWord += currentTargetSeq[currentPos + 1]
newScore += pssm[wordPositionInQuery + len(currentWord) - 1][AA_TO_INT[currentWord[len(currentWord) - 1]]]
if newScore > maxScore:
maxScore = newScore
maxSequence = currentWord
currentPos += 1
# Break if distance too high
if newScore <= maxScore - X:
break
currentPos = extendLeftStart # start Left
maxWordPositionInQuery = wordPositionInQuery # get query position
currentWord = maxSequence # reset sequence
leftExtension = 0
newScore = maxScore
# extend left
while wordPositionInQuery - leftExtension > 0 and currentPos > 0:
currentWord = currentTargetSeq[currentPos - 1] + currentWord
leftExtension += 1
newScore += pssm[wordPositionInQuery - leftExtension][AA_TO_INT[currentWord[0]]]
if newScore > maxScore:
maxScore = newScore
maxSequence = currentWord
maxWordPositionInQuery = wordPositionInQuery - leftExtension
maxWordTargetInQuery = currentPos - 1
currentPos -= 1
if newScore <= maxScore - X:
break
hspScore = int(round(maxScore))
if hspScore >= S:
hspAligns.append((currentTargetSeq, maxSequence, int(round(maxScore)), maxWordPositionInQuery, maxWordTargetInQuery))
hspAligns = set(hspAligns)
for hsp in hspAligns:
if currentTargetSeq not in result:
result[currentTargetSeq] = [(hsp[3], hsp[4], len(hsp[1]), hsp[2])]
else:
result[currentTargetSeq].append((hsp[3], hsp[4], len(hsp[1]), hsp[2]))
return result
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
if pssm is not None:
return self.search_one_hit_pssm(blast_db, pssm=pssm, T=T, X=X, S=S)
wordsOfQuerySeq = self.get_words(sequence=query, pssm=pssm, T=T)
targetSequences = []
for word in wordsOfQuerySeq:
targetSequences += blast_db.get_sequences(word)
targetSequences = set(targetSequences)
wordsToStartInQuerySequence = self.get_words_with_index(sequence=query, T=T, pssm=pssm)
wordsToStartInQuerySequenceSize = len(wordsToStartInQuerySequence)
result = dict()
for targetSequence in targetSequences:
currentTargetSeq = targetSequence
hspAligns = []
wordPositionInQuery = 0
for wi in range(0, wordsToStartInQuerySequenceSize):
wordPositionInQuery = wordsToStartInQuerySequence[wi][1]
startWord = wordsToStartInQuerySequence[wi][0]
if startWord not in currentTargetSeq:
continue
offsetsOfWordsInTargetSequence = self.find_all(currentTargetSeq, startWord)
for targetSeqOffset in offsetsOfWordsInTargetSequence:
extendRightStart = targetSeqOffset + 2
extendLeftStart = targetSeqOffset
currentPos = extendRightStart
currentWord = startWord
maxScore = self.get_hsp_score(wordPositionInQuery, wordPositionInQuery + len(currentWord), query, currentWord)
newScore = maxScore
maxWordTargetInQuery = extendLeftStart
maxSequence = currentWord # in case right extend immidiatly breaks
#extend right
while (wordPositionInQuery + len(currentWord)) < len(query) and currentPos < len(currentTargetSeq) - 1:
currentWord += currentTargetSeq[currentPos + 1]
newScore += self.substitution_matrix[AA_TO_INT[query[wordPositionInQuery + len(currentWord) - 1]]][AA_TO_INT[currentWord[len(currentWord) - 1]]]
# newScore = self.get_hsp_score(wordPositionInQuery, wordPositionInQuery + len(currentWord), query, currentWord)
if newScore > maxScore:
maxScore = newScore
maxSequence = currentWord
currentPos += 1
# Break if distance too high
if newScore <= maxScore - X:
break
currentPos = extendLeftStart # start Left
maxWordPositionInQuery = wordPositionInQuery # get query position
currentWord = maxSequence # reset sequence
leftExtension = 0
newScore = maxScore
# extend left
while wordPositionInQuery - leftExtension > 0 and currentPos > 0:
currentWord = currentTargetSeq[currentPos - 1] + currentWord
leftExtension += 1
newScore += self.substitution_matrix[AA_TO_INT[query[wordPositionInQuery - leftExtension]]][AA_TO_INT[currentWord[0]]]
if newScore > maxScore:
maxScore = newScore
maxSequence = currentWord
maxWordPositionInQuery = wordPositionInQuery - leftExtension
maxWordTargetInQuery = currentPos - 1
currentPos -= 1
if newScore <= maxScore - X:
break
hspScore = int(round(maxScore))
if hspScore >= S:
hspAligns.append((currentTargetSeq, maxSequence, int(round(maxScore)), maxWordPositionInQuery, maxWordTargetInQuery))
hspAligns = set(hspAligns)
for hsp in hspAligns:
if currentTargetSeq not in result:
result[currentTargetSeq] = [(hsp[3], hsp[4], len(hsp[1]), hsp[2])]
else:
result[currentTargetSeq].append((hsp[3], hsp[4], len(hsp[1]), hsp[2]))
return result
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]<file_sep>##############
# Exercise 2.7
##############
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
return isACertainType(aa, ['K', 'R', 'H'])
def isNegativelyCharged(aa):
return isACertainType(aa, ['D', 'E'])
def isHydrophobic(aa):
return isACertainType(aa, ['A', 'V', 'I', 'L', 'M', 'F', 'Y', 'W'])
def isAromatic(aa):
return isACertainType(aa, ['W','Y','F','H'])
def isPolar(aa):
return isACertainType(aa, ['R', 'N', 'D', 'E', 'Q', 'H', 'K', 'S', 'T', 'Y'])
def containsSulfur(aa):
return isACertainType(aa, ['C','M'])
def isAcid(aa):
return isACertainType(aa, ['D','E'])
def isProline(aa):
if aa.upper() == 'P':
return True
else:
return False
def isBasic(aa):
return isACertainType(aa, ['R','K','H'])
def isACertainType(aa, values):
aa = aa.upper()
if aa in values:
return True
else:
return False
<file_sep>##############
# Exercise 2.6
##############
from collections import Counter
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
return len(self.__sequences)
def get_average_length(self):
return sum(self.get_abs_frequencies().values()) / self.get_counts()
def read_fasta(self, filename):
with open(filename, "r") as f:
seq = ""
sequence_started = False
for line in f:
if line.startswith('>') or line.startswith(';'):
if sequence_started:
self.__sequences.append(seq)
seq = ""
sequence_started = False
continue
sequence_started = True
seq += line.strip()
self.__sequences.append(seq)
@staticmethod
def prune_result(dictionary):
if "*" in dictionary:
del dictionary["*"]
return dictionary
def get_abs_frequencies(self):
result = Counter({})
for seq in self.__sequences:
result += Counter(seq)
return self.prune_result(result)
def get_av_frequencies(self):
result = Counter({})
for seq in self.__sequences:
result += Counter(seq)
result = self.prune_result(result)
total = sum(result.values())
for key in result:
result[key] /= total
return result
<file_sep>import numpy as np
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1: str = string1
self.string2: str = string2
self.gap_penalty = gap_penalty
self.substituion_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.aligment_matrix = np.zeros((len(string2) + 1, len(string1) + 1, 3), dtype=np.int)
self.align()
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
# https://codereview.stackexchange.com/questions/210099/needleman-wunsch-grid-generation-in-python
self.aligment_matrix[0, 1:, 1] = np.full(len(self.string1), 2)
self.aligment_matrix[1:, 0, 2] = np.full(len(self.string2), 3)
self.score_matrix[0, 1:] = np.fromiter(
(self.gap_penalty + (i * self.gap_penalty) for i in range(len(self.string1))), dtype="int")
self.score_matrix[1:, 0] = np.fromiter(
(self.gap_penalty + (i * self.gap_penalty) for i in range(len(self.string2))), dtype="int")
for f in range(1, len(self.string1) + 1):
for s in range(1, len(self.string2) + 1):
score_list = [(self.score_matrix[s - 1][f - 1] + self.substituion_matrix.get(self.string1[f - 1])[
self.string2[s - 1]], 1),
(self.score_matrix[s - 1][f] + self.gap_penalty, 2),
(self.score_matrix[s][f - 1] + self.gap_penalty, 3)]
score_matrix_score, max_value = max(score_list, key=lambda x: x[0])
self.score_matrix[s, f] = score_matrix_score
t = [score_list[i][1] for i in range(len(score_list)) if score_list[i][0] == score_matrix_score]
for item in t:
self.aligment_matrix[s, f, item - 1] = item
return self.score_matrix
def get_best_score(self):
"""
:return: the highest score for the aligned strings, int
"""
return self.score_matrix[len(self.string2), len(self.string1)]
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score
"""
return len(self.get_alignments())
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
def find_ali(s, f, path):
newPath = False
if s == 0 and f == 0:
return path
if self.aligment_matrix[s, f, 0] == 1:
newPath = True
path.append((self.string2[s - 1], self.string1[f - 1]))
find_ali(s - 1, f - 1, path)
if self.aligment_matrix[s, f, 1] == 2:
if newPath:
path.append(("//","//"))
newPath = True
path.append((self.string2[s - 1], "-"))
find_ali(s - 1, f, path)
if self.aligment_matrix[s, f, 2] == 3:
if newPath:
path.append(("//", "//"))
path.append(("-", self.string1[f - 1]))
find_ali(s, f-1, path)
alligent_list = []
find_ali(len(self.string2), len(self.string1), alligent_list)
word1 = ""
word2 = ""
split_list = []
for s1,s2 in alligent_list:
if s1 == "//":
split_list.append((word2,word1))
word1 = ""
word2 = ""
else:
word1 += s1
word2 += s2
split_list.append((word2,word1))
for index, word in enumerate(split_list[1:]):
previous_word_s1 = split_list[index][0]
previous_word_s2 = split_list[index][1]
split_list[index+1] = (previous_word_s1[0:len(previous_word_s1)-len(word[0])]+word[0], previous_word_s2[0:len(previous_word_s2)-len(word[1])]+word[1])
final_list = list(map(lambda x: (x[0][::-1],x[1][::-1]), split_list))
return final_list
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as a list of lists
"""
return self.score_matrix
<file_sep>##############
# Exercise 2.5
##############
# You can use the supplied test cases for your own testing. Good luck!
complements = {
"T" : "A",
"A" : "T",
"G" : "C",
"C" : "G"
}
codon_dict = {
'ATG' : 'M',
'TGG' : 'W',
'TAT' : 'Y',
'TAC' : 'Y',
'TTT' : 'F',
'TTC' : 'F',
'TGT' : 'C',
'TGC' : 'C',
'AAT' : 'N',
'AAC' : 'N',
'GAT' : 'D',
'GAC' : 'D',
'CAA' : 'Q',
'CAG' : 'Q',
'GAA' : 'E',
'GAG' : 'E',
'CAT' : 'H',
'CAC' : 'H',
'AAA' : 'K',
'AAG' : 'K',
'ATT' : 'I',
'ATC' : 'I',
'ATA' : 'I',
'GGT' : 'G',
'GGC' : 'G',
'GGA' : 'G',
'GGG' : 'G',
'GCT' : 'A',
'GCC' : 'A',
'GCA' : 'A',
'GCG' : 'A',
'GTT' : 'V',
'GTC' : 'V',
'GTA' : 'V',
'GTG' : 'V',
'ACT' : 'T',
'ACC' : 'T',
'ACA' : 'T',
'ACG' : 'T',
'CCT' : 'P',
'CCC' : 'P',
'CCA' : 'P',
'CCG' : 'P',
'CTT' : 'L',
'CTC' : 'L',
'CTA' : 'L',
'CTG' : 'L',
'TTA' : 'L',
'TTG' : 'L',
'TCT' : 'S',
'TCC' : 'S',
'TCA' : 'S',
'TCG' : 'S',
'AGT' : 'S',
'AGC' : 'S',
'CGT' : 'R',
'CGC' : 'R',
'CGA' : 'R',
'CGG' : 'R',
'AGA' : 'R',
'AGG' : 'R',
'TAA' : 'STOP',
'TAG' : 'STOP',
'TGA' : 'STOP'
}
stops=['TAA', 'TAG', 'TGA']
starts=['ATG']
def codons_to_aa(orf):
if len(orf) % 3 is not 0:
return None
codons = [orf[i:i+3] for i in range(0, len(orf), 3)]
aa = [codon_dict[c] for c in codons]
return aa
def complementary(s):
result = ""
for x in s:
result = result + complements[x]
return result
def get_orfs_helper(genome, shift, revComp):
tmp = (genome+genome)
if len(tmp)%3>0:
tmp = tmp[:-(len(tmp)%3)]
stop = [i for i in range(len(tmp)) if tmp[i:i+3] in stops and i%3==0]
start = [i for i in range(len(tmp)) if tmp[i:i+3] in starts and i%3==0]
results = []
tmpStart= -1
for i in range(len(tmp)):
if i in start:
if tmpStart>-1:
continue
tmpStart=i
if i in stop:
if tmpStart>-1:
if i-tmpStart>99:
aa = ''.join(codons_to_aa((genome+genome)[tmpStart:i]))
tmpStart = tmpStart + shift
tmpStop = i+2+shift
tmpStop = tmpStop%len(genome)
tmpStart = tmpStart%len(genome)
if revComp:
tmpStart = len(genome)-tmpStart-1
tmpStop = len(genome)-tmpStop-1
results.append((tmpStart, tmpStop, aa, revComp))
tmpStart=-1
return results
def get_orfs(genome):
genome = genome.upper()
if not (set(genome) <= set("ACGT")):
raise TypeError("no DNA")
genomeComp=complementary(genome)
genomeRevComp = ''.join(reversed(genomeComp))
resultsTmp=[]
for i in range(3):
resultsTmp=resultsTmp + get_orfs_helper(genomeRevComp[i:]+genomeRevComp[:i],i, True)
resultsTmp=resultsTmp + get_orfs_helper(genome[i:]+genome[:i],i, False)
resultsByStop = {}
for r in resultsTmp:
if r[1] not in resultsByStop:
resultsByStop[r[1]]=[]
resultsByStop[r[1]].append(r)
results=[]
for s in resultsByStop:
best= resultsByStop[s][0]
for r in resultsByStop[s]:
if abs(best[1]-best[0])<abs(r[1]-r[0]):
best=r
results.append(best)
return results
<file_sep>##############
# Exercise 2.7
##############
sulfur_containing_aas = ['C', 'M']
aromatic_aas = ['H', 'F', 'W', 'Y']
aliphatic_aas = ['V', 'I', 'L', 'M'] # Non-aromatic
acid_negatively_charged_aas = ['D', 'E']
basic_positively_charged_aas = ['K', 'R', 'H']
polar_aas = ['R', 'N', 'D', 'Q', 'E', 'H', 'K', 'S', 'T', 'Y']
hydrophobic_aas = ['A', 'V', 'I', 'L', 'F', 'W', 'Y', 'M'] #['A', 'G', 'V', 'I', 'L', 'F', 'P', 'M']
hydrophilic_aas = ['S', 'T', 'H', 'N', 'Q', 'E', 'D', 'K', 'R']
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
return aa in basic_positively_charged_aas
def isNegativelyCharged(aa):
return aa in acid_negatively_charged_aas
def isHydrophobic(aa):
return aa in hydrophobic_aas
def isHidrophilic(aa):
return aa in hydrophilic_aas
def isAromatic(aa):
return aa in aromatic_aas
def isPolar(aa):
return aa in polar_aas
def containsSulfur(aa):
return aa in sulfur_containing_aas
def isAcid(aa):
return aa in acid_negatively_charged_aas
def isBasic(aa):
return aa in basic_positively_charged_aas
def isProline(aa):
return aa == 'P'<file_sep>
#from collections import Counter
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
lst = []
ABS = {}
NORM = {}
# def aa_dist(self, aa_seq):
# counted = Counter(aa_seq)
# for key in counted:
# counted[key] /= len(aa_seq)
# return counted
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
return len(self.lst)
def get_average_length(self):
s = 0
for i in self.lst:
s += len(i)
return s / len(self.lst)
def read_fasta(self, filename):
res = []
f = open(filename, 'r')
seq = ''
for line in f:
if line.startswith('>') or line.startswith(';'):
if seq.endswith('*'):
seq = seq[:-1]
res.append(seq)
seq = ''
continue
seq += line.strip()
res.append(seq)
res.pop(0)
self.lst = res
def get_abs_frequencies(self):
# return number of occurences not normalized by length
s = ''.join(self.lst)
self.ABS['F'] = s.count('F')
self.ABS['L'] = s.count('L')
self.ABS['I'] = s.count('I')
self.ABS['M'] = s.count('M')
self.ABS['V'] = s.count('V')
self.ABS['S'] = s.count('S')
self.ABS['P'] = s.count('P')
self.ABS['T'] = s.count('T')
self.ABS['A'] = s.count('A')
self.ABS['Y'] = s.count('Y')
self.ABS['H'] = s.count('H')
self.ABS['Q'] = s.count('Q')
self.ABS['N'] = s.count('N')
self.ABS['K'] = s.count('K')
self.ABS['D'] = s.count('D')
self.ABS['E'] = s.count('E')
self.ABS['C'] = s.count('C')
self.ABS['W'] = s.count('W')
self.ABS['R'] = s.count('R')
self.ABS['S'] = s.count('S')
self.ABS['R'] = s.count('R')
self.ABS['G'] = s.count('G')
return self.ABS
def get_av_frequencies(self):
# return number of occurences normalized by length
s = ''.join(self.lst)
self.NORM['F'] = s.count('F') / len(s)
self.NORM['L'] = s.count('L') / len(s)
self.NORM['I'] = s.count('I') / len(s)
self.NORM['M'] = s.count('M') / len(s)
self.NORM['V'] = s.count('V') / len(s)
self.NORM['S'] = s.count('S') / len(s)
self.NORM['P'] = s.count('P') / len(s)
self.NORM['T'] = s.count('T') / len(s)
self.NORM['A'] = s.count('A') / len(s)
self.NORM['Y'] = s.count('Y') / len(s)
self.NORM['H'] = s.count('H') / len(s)
self.NORM['Q'] = s.count('Q') / len(s)
self.NORM['N'] = s.count('N') / len(s)
self.NORM['K'] = s.count('K') / len(s)
self.NORM['D'] = s.count('D') / len(s)
self.NORM['E'] = s.count('E') / len(s)
self.NORM['C'] = s.count('C') / len(s)
self.NORM['W'] = s.count('W') / len(s)
self.NORM['R'] = s.count('R') / len(s)
self.NORM['S'] = s.count('S') / len(s)
self.NORM['R'] = s.count('R') / len(s)
self.NORM['G'] = s.count('G') / len(s)
return self.NORM
#a = AADist('tests.fasta')
#a.read_fasta('tests.fasta')
#print(a.lst)
#print(a.get_counts())
#print(a.get_average_length())
#a.get_abs_frequencies()
#print(a.ABS)
#a.get_av_frequencies()
#print(a.NORM)
<file_sep>import re
import json
from pathlib import Path
from operator import itemgetter
from collections import namedtuple
# Read genome from file (may ONLY contain the sequence)
def read_genome(file):
genome = ''
with Path(file).open('r') as genome_file:
for line in genome_file:
genome += ''.join(line.split())
return genome
# Check if genome contains only legal characters
def check_genome(genome):
# Genome may only contain the four basic nucleotides (upper- and lowercase)
if re.fullmatch('[ACGTacgt]+', genome):
return True
else:
return False
# Create the reverse-complementary strand
def reverse_complementary(genome):
original = 'ACGTacgt'
complement = 'TGCAtgca'
trans_table = str.maketrans(original, complement)
genome = genome.translate(trans_table) # Translate nucleotides
genome = genome[::-1] # Reverse sequence
return genome
# Find all possible ORFs within genome
def find_orfs(genome, is_reverse_strand):
# Compile ORF pattern.
# Start codons: ATG
# Stop codons: TAA, TAG, TGA
# Minimum protein length: start + 33 = 34 codons/amino acids
pattern = re.compile('(ATG)((?:(?!TA[AG]|TGA)[ACGT]{3}){33,})(TA[AG]|TGA)')
# Tuple to define/save ORFs
Orf = namedtuple('ORF', ['start', 'stop', 'seq', 'is_reverse_strand'])
orfs = {} # Dictionary of ORFs
length = len(genome) # Length of genome
genome = genome.upper() # Convert to uppercase for pattern matching
genome = genome * 2 # Duplicate genome for circular ORFs
# Variables to navigate through genome
current_start = 0
current_stop = 0
last_frame_stop = [0, 0, 0]
# Reusable variables to define ORFs
orf_start = 0
orf_stop = 0
orf_seq = ''
while True:
match = pattern.search(genome, current_start)
if not match:
break
current_start = match.start() + 1
current_stop = match.end()
# Stop after one full loop around the genome
if current_start > length:
break
# Prevent ORFs from spanning more than one full cycle
if current_stop - current_start >= length:
continue
# Prevent overlapping ORFs with the same stop codon
if last_frame_stop[current_stop % 3] == current_stop:
continue
else:
last_frame_stop[current_stop % 3] = current_stop
# ORF sequence (including stop codon)
orf_seq = match.group()
# ORFs on the reverse strand are indexed on the primary strand
if is_reverse_strand:
orf_start = length - current_start + 1
orf_stop = length - ((current_stop - 1) % length)
else:
orf_start = current_start
orf_stop = ((current_stop - 1) % length) + 1
# Circular ORFs might share stop codons with some of the first ORFs
# Overwrite previous ORFs (circular ORFs are by definition longer)
# Subtract 1 from start/stop to get indices starting at 0
orfs[current_stop % length] = Orf(orf_start - 1,
orf_stop - 1,
codons_to_aa(orf_seq),
is_reverse_strand)
# Convert to list and sort by start position
orfs_list = list(orfs.values())
orfs_list.sort(key=itemgetter(0))
return orfs_list
def get_orfs(genome):
# Check genome first, then find ORFs on primary and reverse strand
if check_genome(genome):
orfs_list = find_orfs(genome, False)
orfs_list.extend(find_orfs(reverse_complementary(genome), True))
return orfs_list
else:
raise TypeError
# Translate codons to amino acids
codon_dict = {
'TTA': 'L', 'TTG': 'L', 'CTT': 'L', 'CTC': 'L', 'CTA': 'L', 'CTG': 'L',
'TCT': 'S', 'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'AGT': 'S', 'AGC': 'S',
'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R', 'AGA': 'R', 'AGG': 'R',
'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V',
'CCT': 'P', 'CCC': 'P', 'CCA': 'P', 'CCG': 'P',
'ACT': 'T', 'ACC': 'T', 'ACA': 'T', 'ACG': 'T',
'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A',
'GGT': 'G', 'GGC': 'G', 'GGA': 'G', 'GGG': 'G',
'ATT': 'I', 'ATC': 'I', 'ATA': 'I',
'TTT': 'F', 'TTC': 'F',
'TAT': 'Y', 'TAC': 'Y',
'CAT': 'H', 'CAC': 'H',
'CAA': 'Q', 'CAG': 'Q',
'AAT': 'N', 'AAC': 'N',
'AAA': 'K', 'AAG': 'K',
'GAT': 'D', 'GAC': 'D',
'GAA': 'E', 'GAG': 'E',
'TGT': 'C', 'TGC': 'C',
'ATG': 'M',
'TGG': 'W',
'TAA': '!', 'TAG': '!', 'TGA': '!' # Stop codons
}
def codons_to_aa(orf):
# Check is length is multiple of three
if len(orf) % 3 != 0:
return None
# Conversion to codons
codons = [orf[i:i+3] for i in range(0, len(orf), 3)]
# Translate into amino acid sequence
aa_seq = ''.join(codon_dict[c] for c in codons)
# Check for correct start and stop codons
if not aa_seq.startswith('M') or not aa_seq.endswith('!'):
return None
# Check for intermediate stop codons
if aa_seq.count('!') != 1:
return None
# Remove stop codon symbol and return aa_seq
return aa_seq[:-1]
def main():
genome = read_genome('genome.txt')
orfs = get_orfs(genome)
for n, orf in enumerate(orfs):
print(n, orf, sep='\t')
json_data = dict()
json_data['genome'] = genome
json_data['orf_list'] = orfs
json_data['invalid_genome'] = 'ACGTANDSIMILAR'
# json_data['invalid_genome'] = 'NOTAGENOME'
# json_data['invalid_genome'] = 'WHATISTHIS'
with Path('orffinder_test.json').open('w') as json_file:
json.dump(json_data, json_file, indent=4)
if __name__ == '__main__':
main()
<file_sep>##############
# Exercise 2.5
##############
# You can use the supplied test cases for your own testing. Good luck!
CODONTABLE={
"TTT": "F","TTC": "F","TTA": "L","TTG": "L",
"TCT": "S","TCC": "S","TCA": "S","TCG": "S",
"TAT": "Y","TAC": "Y","TAA": "Stop","TAG": "Stop",
"TGT": "C","TGC": "C","TGA": "Stop","TGG": "W",
"CTT": "L","CTC": "L","CTA": "L","CTG": "L",
"CCT": "P","CCC": "P","CCA": "P","CCG": "P",
"CAT": "H","CAC": "H","CAA": "Q","CAG": "Q",
"CGT": "R","CGC": "R","CGA": "R","CGG": "R",
"ATT": "I", "ATC": "I", "ATA": "I", "ATG": "M",#METHIONINE M IS START ALSO
"ACT": "T", "ACC": "T", "ACA": "T", "ACG": "T",
"AAT": "N", "AAC": "N", "AAA": "K", "AAG": "K",
"AGT": "S", "AGC": "S", "AGA": "R", "AGG": "R",
"GTT": "V", "GTC": "V", "GTA": "V", "GTG": "V",
"GCT": "A", "GCC": "A", "GCA": "A", "GCG": "A",
"GAT": "D", "GAC": "D", "GAA": "E", "GAG": "E",
"GGT": "G", "GGC": "G", "GGA": "G", "GGG": "G",
}
NUCLEOTIDES = {
"A": "T",
"T": "A",
"G": "C",
"C": "G"
}
def complementary(somenucletodides):
return "".join([NUCLEOTIDES[nuc] for nuc in somenucletodides])
def getCodons(genome, offset, complementaryReverse=False):#length of every line=61
codons = []
codon = ""
if complementaryReverse==True:
genome = reversed(genome)
genome = complementary(genome)
offsetnucleotides=""
for nucleotide in genome[offset:]:
codon = codon + nucleotide
if len(codon) == 3:
codons.append(codon)
codon = ""
if len(codon)!=3:
offsetnucleotides+=codon
offsetnucleotides+=genome[0:offset]
return codons,offsetnucleotides
def getNucleotidelist(genome, offset, complementaryReverse=False):
nucleotidelist=""
offsetnucleotides = ""
if complementaryReverse==True:
genome = reversed(genome)
genome = complementary(genome)
for i in range(offset,len(genome[offset:]),3):
if len(genome[i-1:len(genome)-1])<3 and i>0:
while i<=len(genome)-1:
offsetnucleotides+=genome[i]
i+=1
else:
nucleotidelist+=genome[i:i+3]
i=0
while i<offset:
offsetnucleotides+=genome[i]
i+=1
return nucleotidelist,offsetnucleotides
def getFrames(genome):
frames=[]
for i in range(3):
codons,offsetnucleotides=getCodons(genome,i,complementaryReverse=False)
frames.append([[codons,False],i])
codons,offsetnucleotides = getCodons(genome, i, complementaryReverse=True)
frames.append([[codons,True],i])
return frames
def aminoacidsequencefromorf(orf):
seq=""
for codon in orf[0:len(orf)]:
seq=seq+CODONTABLE[codon]
return seq
def getorffromnucleotidelist_offsetnucleotids(nucleotidelist, offsetnucleotides, cr,offset):
orfes=[]
codon=""
orf=[]
hasStarted=False
hasStopped=True
for i in range(0,len(nucleotidelist)-2,3):
codon=nucleotidelist[i:i+3]
if codon=="ATG" and hasStarted==False and hasStopped==True:
hasStarted=True
hasStopped=False
orf=[]
if cr==False:
startindex=i+offset
else:
fulllastindex=len(nucleotidelist)-1+len(offsetnucleotides)
startindex=fulllastindex-i-offset
if codon in ["TAA","TAG","TGA"] and hasStarted==True and hasStopped==False:
hasStarted=False
hasStopped=True
if cr==False:
stopindex = i + offset + 2
else:
fulllastindex = len(nucleotidelist) - 1 + len(offsetnucleotides)
stopindex = fulllastindex - i-2-offset
orf = (startindex, stopindex, aminoacidsequencefromorf(orf), cr)
if (len(orf[2]) >= 34):
orfes.append(orf)
else:
orf=[]
if hasStarted==True:
orf.append(codon)
if hasStarted==True and hasStopped==False:
orf.pop()
lastpart=nucleotidelist[i:]+offsetnucleotides
fulllistshifted=lastpart+nucleotidelist[0:startindex]
if startindex==2673:
cacca=""
for j in range(0,len(fulllistshifted)-2,3):
codon = fulllistshifted[j:j + 3]
if codon in ["TAA", "TAG", "TGA"] and hasStarted == True and hasStopped == False:
hasStarted = False
hasStopped = True
if cr == False:
stopindex = j - len(lastpart) + 2
else:
fulllastindex = len(nucleotidelist) - 1 + len(offsetnucleotides)
stopindex = fulllastindex -len(lastpart)- j - 2 - offset
orf = (startindex, stopindex, aminoacidsequencefromorf(orf), cr)
if (len(orf[2]) >= 34):
orfes.append(orf)
else:
orf = []
if hasStarted == True:
orf.append(codon)
return orfes
def getOrfessingleframePrimary(offset, genome):
j=0
orfes = []
hasStarted = False
hasStopped = True
readingindex=offset
while j<len(genome)*4:
if readingindex+3>len(genome):
beforetheedge = len(genome)-readingindex
beyondtheedge=3-beforetheedge
readingindex=beyondtheedge
codon = genome[len(genome)-beforetheedge:len(genome)]+genome[0:beyondtheedge]
else:
codon = genome[readingindex:readingindex+3]
readingindex+=3
if codon=="ATG" and hasStarted==False and hasStopped==True:
hasStarted=True
hasStopped=False
orf=[]
startindex=readingindex-3
if hasStarted == True and hasStopped == False and codon not in ["TAA","TAG","TGA"]:
orf.append(codon)
if codon in ["TAA","TAG","TGA"] and hasStarted==True and hasStopped==False:
hasStarted=False
hasStopped=True
stopindex = readingindex-3+ 2
orf = (startindex, stopindex, aminoacidsequencefromorf(orf), False)
if (len(orf[2]) >= 34):
orfes.append(orf)
j+=3
return orfes
def getOrfessingleframeCompRev(offset,genome):
j=len(genome)-1
orfes = []
hasStarted = False
hasStopped = True
readingindex=len(genome)-offset
while j>-len(genome)*3:
if readingindex-3<0:
beforetheedge = -(readingindex-3)
beyondtheedge = 3-beforetheedge
readingindex=len(genome)-beyondtheedge+3
codonaft=genome[len(genome)-beyondtheedge:len(genome)]
codonbef= genome[0:beforetheedge]
codon=codonaft+codonbef
else:
codon = genome[readingindex-3:readingindex]
codon = reversed(codon)
codon = complementary(codon)
if codon=="ATG" and hasStarted==False and hasStopped==True:
hasStarted=True
hasStopped=False
orf=[]
startindex=readingindex-1
if hasStarted == True and hasStopped == False and codon not in ["TAA","TAG","TGA"]:
orf.append(codon)
if codon in ["TAA","TAG","TGA"] and hasStarted==True and hasStopped==False:
hasStarted=False
hasStopped=True
stopindex = readingindex-1- 2
orf = (startindex, stopindex, aminoacidsequencefromorf(orf), True)
if (len(orf[2]) >= 34):
orfes.append(orf)
j-=3
readingindex-=3
return orfes
def getOrfessingleframe(i, cr,genome):
if cr==False:
orflist=getOrfessingleframePrimary(i, genome)
if cr==True:
orflist=getOrfessingleframeCompRev(i, genome)
return orflist
def removesameorfes(orfes):
realorfes=[]
removed=[]
substitution = False
for orf in orfes:
if orf not in realorfes:
realorfes.append(orf)
for orf in realorfes:
for orf2 in realorfes:
if orf[1]==orf2[1]:
if len(orf[2])>len(orf2[2]):
if orf2 not in removed:
removed.append(orf2)
if len(orf[2])<len(orf2[2]):
if orf not in removed:
removed.append(orf)
for rem in removed:
realorfes.remove(rem)
return realorfes
def getOrfessingleframePrimarydoublestring(offset, genome):
reallength = len(genome)
genome=genome+genome
orfes = []
hasStarted = False
hasStopped = True
for i in range(offset,len(genome),3):
if i+3>=len(genome):
break
codon = genome[i:i + 3]
if codon == "ATG" and hasStarted == False and hasStopped == True:
hasStarted = True
hasStopped = False
orf = []
if(i>reallength-1):
startindex=i-reallength
else:
startindex = i
if hasStarted == True and hasStopped == False and codon not in ["TAA", "TAG", "TGA"]:
orf.append(codon)
if codon in ["TAA", "TAG", "TGA"] and hasStarted == True and hasStopped == False:
hasStarted = False
hasStopped = True
if(i+2>reallength-1):
stopindex = i -reallength+ 2
else:
stopindex = i + 2
orf = (startindex, stopindex, aminoacidsequencefromorf(orf), False)
cacca=len(orf[2])
if len(orf[2]) >= 34:
orfes.append(orf)
return orfes
def getOrfessingleframeCompRevdoublestring(offset, genome):
reallength = len(genome)
genome = genome + genome
genome = reversed(genome)
genome = complementary(genome)
orfes = []
hasStarted = False
hasStopped = True
for i in range(offset,len(genome), 3):
if i+3>=len(genome):
break
codon = genome[i:i+3]
if codon == "ATG" and hasStarted == False and hasStopped == True:
hasStarted = True
hasStopped = False
orf = []
if(i>reallength-1):
startindex = i - reallength
startindex = reallength - 1 - startindex
else:
startindex = reallength - 1-i
if hasStarted == True and hasStopped == False and codon not in ["TAA", "TAG", "TGA"]:
orf.append(codon)
if codon in ["TAA", "TAG", "TGA"] and hasStarted == True and hasStopped == False:
hasStarted = False
hasStopped = True
if(i+2>reallength-1):
stopindex = i - reallength
stopindex = reallength - 1 - stopindex-2
else:
stopindex=reallength-1-i-2
orf = (startindex, stopindex, aminoacidsequencefromorf(orf), True)
if len(orf[2]) >= 34:
orfes.append(orf)
return orfes
def getOrfessingleframedoublestring(i, cr, genome):
if cr==False:
orflist=getOrfessingleframePrimarydoublestring(i, genome)
if cr==True:
orflist=getOrfessingleframeCompRevdoublestring(i, genome)
return orflist
def getOrfessingleframedoublestringfull(offset, genome,compReverse):
reallength = len(genome)
genome = genome + genome
if compReverse:
genome = reversed(genome)
genome = complementary(genome)
orfes = []
hasStarted = False
hasStopped = True
over=False
for i in range(offset, len(genome), 3):
if i + 2 >reallength-1:
over=True
codon = genome[i:i + 3]
if codon == "ATG" and hasStarted == False and hasStopped == True:
hasStarted = True
hasStopped = False
orf = []
startindex = i % reallength
if compReverse:
startindex=reallength-1-startindex
if hasStarted == True and hasStopped == False and codon not in ["TAA", "TAG", "TGA"]:
orf.append(codon)
if codon in ["TAA", "TAG", "TGA"] and hasStarted == True and hasStopped == False:
hasStarted = False
hasStopped = True
stopindex = i % reallength
if compReverse:
stopindex=reallength-1-stopindex-2
else:
stopindex = stopindex+2
orf = (startindex, stopindex, aminoacidsequencefromorf(orf), compReverse)
if len(orf[2]) >= 34:
orfes.append(orf)
if over:
return orfes
return orfes
def get_orfs(genome):#len 2726, last index 2725
if not isGenomeDna(genome):
raise TypeError()
orfes=[]
'''for cr in (False,True):
for i in range(0, 3):
nucleotidelist, offsetnucleotides = getNucleotidelist(genome, i, complementaryReverse=cr)
orflist=getorffromnucleotidelist_offsetnucleotids(nucleotidelist,offsetnucleotides,cr,i)
for orf in orflist:
orfes.append(orf)'''
'''for cr in (False, True):
for i in range(0, 3):
orflist=getOrfessingleframe(i,cr,genome)
for orf in orflist:
orfes.append(orf)'''
'''for cr in (False, True):
for i in range(0, 3):
orflist=getOrfessingleframedoublestring(i,cr,genome)
for orf in orflist:
orfes.append(orf)'''
for cr in (False, True):
for i in range(0, 3):
orflist = getOrfessingleframedoublestringfull(i, genome,cr)
for orf in orflist:
orfes.append(orf)
orfes=removesameorfes(orfes)
return orfes
def isGenomeDna(genome):
for nucleotide in genome:
if nucleotide not in ('T','C','A','G'):
return False
return True
##COPIED##
# FUNCTION START
def orfFINDER(dna, offset):
orfes=[]
stop_codons = ['tga', 'tag', 'taa']
start_codon = ['atg']
start_positions = []
stop_positions = []
num_starts = 0
num_stops = 0
for i in range(offset, len(dna), 3):
codon = dna[i:i + 3].lower()
if codon in start_codon:
start_positions += str(i + 1).splitlines()
if codon in stop_codons:
stop_positions += str(i + 1).splitlines()
for line in stop_positions:
num_stops += 1
for line in start_positions:
num_starts += 1
orffound = {}
if num_stops >= 1 and num_starts >= 1: # first statment: the number of stop codons and start condos are greater than or equal to 1;
orfs = True
stop_before = 0
start_before = 0
if num_starts > num_stops:
num_runs = num_starts
if num_stops > num_starts:
num_runs = num_stops
if num_starts == num_stops:
num_runs = num_starts
position_stop_previous = 0
position_start_previous = 0
counter = 0
for position_stop in stop_positions:
position_stop = int(position_stop.rstrip()) + 2
for position_start in start_positions:
position_start = position_start.rstrip()
if int(position_start) < int(position_stop) and int(position_stop) > int(
position_stop_previous) and int(position_start) > int(position_stop_previous):
counter += 1
nameorf = "orf" + str(counter)
position_stop_previous += int(position_stop) - int(position_stop_previous)
position_start_previous += int(position_start) - int(position_start_previous)
sizeorf = int(position_stop) - int(position_start) + 1
orffound[nameorf] = position_start, position_stop, sizeorf, offset
seq=dna[int(position_start):int(position_stop) + 1]
orr=[]
for i in range(0,len(seq),3):
orr.append(seq[i:i+3])
orf=aminoacidsequencefromorf(orr)
orfes.append((position_start, position_stop, orf,False))
else:
pass
else:
orfs = False
return orfes
# FUNCTION END
# EXECUTE THE ORFFINDER FUNCTION
##COPIED##<file_sep>import numpy as np
from math import log
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
self.sequences = None
if len(sequences) == 0:
raise TypeError('MSA must contain at least one sequence.')
num_of_columes = len(sequences[0])
for i in range(1, len(sequences)):
if len(sequences[i]) != num_of_columes:
raise TypeError('MSA rows must all have the same number of columns.')
for i in range(len(sequences)):
for j in range(len(sequences[i])):
index = -1
try:
index = AA_TO_INT[sequences[i][j]]
except KeyError:
raise TypeError('Invalid amino acid character: {0}'.format(sequences[i][j]))
if index < 0:
raise TypeError('Invalid amino acid character: {0}'.format(sequences[i][j]))
self.sequences = sequences
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
sequence_size = self.get_size()
if sequence_size[0] < 0:
raise TypeError('MSA must contain at least one sequence.')
if sequence_size[1] == 0:
raise TypeError('MSA must contain sequences with a length greater than 0.')
if bg_matrix == None:
bg_matrix = np.zeros((20, 20))
for i in range(20):
for j in range(20):
bg_matrix[i][j] = 1.0 / 400.0
background_frequency = 0.05
weights = self.get_sequence_weights()
num_of_observations = self.get_number_of_observations()
pssm = np.zeros((sequence_size[1], 21))
weighted_counts = np.zeros((sequence_size[1], 21))
pseudo_counts = np.zeros((sequence_size[1], 20))
background_frequencies = np.zeros(20)
for i in range(0, 20):
value = 0.0
for j in range(0, 20):
value += bg_matrix[i][j]
background_frequencies[i] = value
for j in range(sequence_size[1]):
for k in range(0, 21):
value = 0.0
residue = '-'
if k < 20:
residue = INT_TO_AA[k]
for i in range(sequence_size[0]):
if self.sequences[i][j] == residue:
if use_sequence_weights:
value += weights[i]
else:
value += 1.0
weighted_counts[j][k] = value
pssm = weighted_counts
if redistribute_gaps:
for i in range(0, 20):
for j in range(sequence_size[1]):
gap_count = 0
for k in range(sequence_size[0]):
if self.sequences[k][j] == '-':
gap_count += 1
pssm[j][i] += pssm[j][20] * background_frequencies[i]
for k in range(0, 20):
for j in range(sequence_size[1]):
pseudo_value = 0.0
for l in range(0, 20):
pseudo_value += pssm[j][l] * bg_matrix[l][k] / background_frequencies[l]
pseudo_counts[j][k] = pseudo_value
if add_pseudocounts:
for i in range(0, 20):
for j in range(sequence_size[1]):
pssm[j][i] = (pssm[j][i] * (num_of_observations-1) + pseudo_counts[j][i] * beta) / (num_of_observations-1 + beta)
for j in range(sequence_size[1]):
value = 0.0
for i in range(0, 20):
value += pssm[j][i]
if value > 0.0:
for i in range(0, 20):
pssm[j][i] = pssm[j][i] / value
for j in range(0, 20):
for i in range(sequence_size[1]):
if pssm[i][j] > 0.0:
pssm[i][j] = 2.0 * log((pssm[i][j] / background_frequencies[j]), 2)
else:
pssm[i][j] = -20
if pssm[i][j] == float('-inf'):
pssm[i][j] = -20
result_pssm = []
index = 0
for i in range(sequence_size[1]):
if self.sequences[0][i] != '-':
result_pssm.append([])
for j in range(0, 20):
result_pssm[index].append(round(pssm[i][j]))
index += 1
return np.rint(np.array(result_pssm)).astype(np.int64)
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
if self.sequences == None:
return (-1, -1)
num_of_rows = len(self.sequences)
num_of_colums = len(self.sequences[0])
return (num_of_rows, num_of_colums)
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
if self.sequences == None:
return ''
return self.sequences[0].replace('-', '')
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
sequence_size = self.get_size()
if sequence_size[0] < 0:
raise TypeError('MSA must contain at least one sequence.')
weights = np.zeros(sequence_size[0])
for i in range(sequence_size[0]):
for j in range(sequence_size[1]):
a = self.sequences[i][j]
occurrences = 0
value_set = set([])
for k in range(sequence_size[0]):
b = self.sequences[k][j]
value_set.add(b)
if b == a:
occurrences += 1
if len(value_set) != 1 and occurrences * len(value_set) != 0:
weight = 1 / (occurrences * len(value_set))
weights[i] += weight
return weights.astype(np.float64)
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
sequence_size = self.get_size()
if sequence_size[0] < 0:
raise TypeError('MSA must contain at least one sequence.')
if sequence_size[1] == 0:
return 0.0
value = 0.0
for j in range(sequence_size[1]):
value_set = set([])
for i in range(sequence_size[0]):
value_set.add(self.sequences[i][j])
value += len(value_set)
num_obs = value / sequence_size[1]
return num_obs<file_sep>import numpy as np
blosum = {
'A': {'A': 4, 'C': 0, 'B': -2, 'E': -1, 'D': -2, 'G': 0, 'F': -2, 'I': -1, 'H': -2, 'K': -1, 'M': -1, 'L': -1, 'N': -2, 'Q': -1, 'P': -1, 'S': 1, 'R': -1, 'T': 0, 'W': -3, 'V': 0, 'Y': -2, 'X': 0, 'Z': -1},
'C': {'A': 0, 'C': 9, 'B': -3, 'E': -4, 'D': -3, 'G': -3, 'F': -2, 'I': -1, 'H': -3, 'K': -3, 'M': -1, 'L': -1, 'N': -3, 'Q': -3, 'P': -3, 'S': -1, 'R': -3, 'T': -1, 'W': -2, 'V': -1, 'Y': -2, 'X': -2, 'Z': -3},
'B': {'A': -2, 'C': -3, 'B': 4, 'E': 1, 'D': 4, 'G': -1, 'F': -3, 'I': -3, 'H': 0, 'K': 0, 'M': -3, 'L': -4, 'N': 3, 'Q': 0, 'P': -2, 'S': 0, 'R': -1, 'T': -1, 'W': -4, 'V': -3, 'Y': -3, 'X': -1, 'Z': 1},
'E': {'A': -1, 'C': -4, 'B': 1, 'E': 5, 'D': 2, 'G': -2, 'F': -3, 'I': -3, 'H': 0, 'K': 1, 'M': -2, 'L': -3, 'N': 0, 'Q': 2, 'P': -1, 'S': 0, 'R': 0, 'T': -1, 'W': -3, 'V': -2, 'Y': -2, 'X': -1, 'Z': 4},
'D': {'A': -2, 'C': -3, 'B': 4, 'E': 2, 'D': 6, 'G': -1, 'F': -3, 'I': -3, 'H': -1, 'K': -1, 'M': -3, 'L': -4, 'N': 1, 'Q': 0, 'P': -1, 'S': 0, 'R': -2, 'T': -1, 'W': -4, 'V': -3, 'Y': -3, 'X': -1, 'Z': 1},
'G': {'A': 0, 'C': -3, 'B': -1, 'E': -2, 'D': -1, 'G': 6, 'F': -3, 'I': -4, 'H': -2, 'K': -2, 'M': -3, 'L': -4, 'N': 0, 'Q': -2, 'P': -2, 'S': 0, 'R': -2, 'T': -2, 'W': -2, 'V': -3, 'Y': -3, 'X': -1, 'Z': -2},
'F': {'A': -2, 'C': -2, 'B': -3, 'E': -3, 'D': -3, 'G': -3, 'F': 6, 'I': 0, 'H': -1, 'K': -3, 'M': 0, 'L': 0, 'N': -3, 'Q': -3, 'P': -4, 'S': -2, 'R': -3, 'T': -2, 'W': 1, 'V': -1, 'Y': 3, 'X': -1, 'Z': -3},
'I': {'A': -1, 'C': -1, 'B': -3, 'E': -3, 'D': -3, 'G': -4, 'F': 0, 'I': 4, 'H': -3, 'K': -3, 'M': 1, 'L': 2, 'N': -3, 'Q': -3, 'P': -3, 'S': -2, 'R': -3, 'T': -1, 'W': -3, 'V': 3, 'Y': -1, 'X': -1, 'Z': -3},
'H': {'A': -2, 'C': -3, 'B': 0, 'E': 0, 'D': -1, 'G': -2, 'F': -1, 'I': -3, 'H': 8, 'K': -1, 'M': -2, 'L': -3, 'N': 1, 'Q': 0, 'P': -2, 'S': -1, 'R': 0, 'T': -2, 'W': -2, 'V': -3, 'Y': 2, 'X': -1, 'Z': 0},
'K': {'A': -1, 'C': -3, 'B': 0, 'E': 1, 'D': -1, 'G': -2, 'F': -3, 'I': -3, 'H': -1, 'K': 5, 'M': -1, 'L': -2, 'N': 0, 'Q': 1, 'P': -1, 'S': 0, 'R': 2, 'T': -1, 'W': -3, 'V': -2, 'Y': -2, 'X': -1, 'Z': 1},
'M': {'A': -1, 'C': -1, 'B': -3, 'E': -2, 'D': -3, 'G': -3, 'F': 0, 'I': 1, 'H': -2, 'K': -1, 'M': 5, 'L': 2, 'N': -2, 'Q': 0, 'P': -2, 'S': -1, 'R': -1, 'T': -1, 'W': -1, 'V': 1, 'Y': -1, 'X': -1, 'Z': -1},
'L': {'A': -1, 'C': -1, 'B': -4, 'E': -3, 'D': -4, 'G': -4, 'F': 0, 'I': 2, 'H': -3, 'K': -2, 'M': 2, 'L': 4, 'N': -3, 'Q': -2, 'P': -3, 'S': -2, 'R': -2, 'T': -1, 'W': -2, 'V': 1, 'Y': -1, 'X': -1, 'Z': -3},
'N': {'A': -2, 'C': -3, 'B': 3, 'E': 0, 'D': 1, 'G': 0, 'F': -3, 'I': -3, 'H': 1, 'K': 0, 'M': -2, 'L': -3, 'N': 6, 'Q': 0, 'P': -2, 'S': 1, 'R': 0, 'T': 0, 'W': -4, 'V': -3, 'Y': -2, 'X': -1, 'Z': 0},
'Q': {'A': -1, 'C': -3, 'B': 0, 'E': 2, 'D': 0, 'G': -2, 'F': -3, 'I': -3, 'H': 0, 'K': 1, 'M': 0, 'L': -2, 'N': 0, 'Q': 5, 'P': -1, 'S': 0, 'R': 1, 'T': -1, 'W': -2, 'V': -2, 'Y': -1, 'X': -1, 'Z': 3},
'P': {'A': -1, 'C': -3, 'B': -2, 'E': -1, 'D': -1, 'G': -2, 'F': -4, 'I': -3, 'H': -2, 'K': -1, 'M': -2, 'L': -3, 'N': -2, 'Q': -1, 'P': 7, 'S': -1, 'R': -2, 'T': -1, 'W': -4, 'V': -2, 'Y': -3, 'X': -2, 'Z': -1},
'S': {'A': 1, 'C': -1, 'B': 0, 'E': 0, 'D': 0, 'G': 0, 'F': -2, 'I': -2, 'H': -1, 'K': 0, 'M': -1, 'L': -2, 'N': 1, 'Q': 0, 'P': -1, 'S': 4, 'R': -1, 'T': 1, 'W': -3, 'V': -2, 'Y': -2, 'X': 0, 'Z': 0},
'R': {'A': -1, 'C': -3, 'B': -1, 'E': 0, 'D': -2, 'G': -2, 'F': -3, 'I': -3, 'H': 0, 'K': 2, 'M': -1, 'L': -2, 'N': 0, 'Q': 1, 'P': -2, 'S': -1, 'R': 5, 'T': -1, 'W': -3, 'V': -3, 'Y': -2, 'X': -1, 'Z': 0},
'T': {'A': 0, 'C': -1, 'B': -1, 'E': -1, 'D': -1, 'G': -2, 'F': -2, 'I': -1, 'H': -2, 'K': -1, 'M': -1, 'L': -1, 'N': 0, 'Q': -1, 'P': -1, 'S': 1, 'R': -1, 'T': 5, 'W': -2, 'V': 0, 'Y': -2, 'X': 0, 'Z': -1},
'W': {'A': -3, 'C': -2, 'B': -4, 'E': -3, 'D': -4, 'G': -2, 'F': 1, 'I': -3, 'H': -2, 'K': -3, 'M': -1, 'L': -2, 'N': -4, 'Q': -2, 'P': -4, 'S': -3, 'R': -3, 'T': -2, 'W': 11, 'V': -3, 'Y': 2, 'X': -2, 'Z': -3},
'V': {'A': 0, 'C': -1, 'B': -3, 'E': -2, 'D': -3, 'G': -3, 'F': -1, 'I': 3, 'H': -3, 'K': -2, 'M': 1, 'L': 1, 'N': -3, 'Q': -2, 'P': -2, 'S': -2, 'R': -3, 'T': 0, 'W': -3, 'V': 4, 'Y': -1, 'X': -1, 'Z': -2},
'Y': {'A': -2, 'C': -2, 'B': -3, 'E': -2, 'D': -3, 'G': -3, 'F': 3, 'I': -1, 'H': 2, 'K': -2, 'M': -1, 'L': -1, 'N': -2, 'Q': -1, 'P': -3, 'S': -2, 'R': -2, 'T': -2, 'W': 2, 'V': -1, 'Y': 7, 'X': -1, 'Z': -2},
'X': {'A': 0, 'C': -2, 'B': -1, 'E': -1, 'D': -1, 'G': -1, 'F': -1, 'I': -1, 'H': -1, 'K': -1, 'M': -1, 'L': -1, 'N': -1, 'Q': -1, 'P': -2, 'S': 0, 'R': -1, 'T': 0, 'W': -2, 'V': -1, 'Y': -1, 'X': -1, 'Z': -1},
'Z': {'A': -1, 'C': -3, 'B': 1, 'E': 4, 'D': 1, 'G': -2, 'F': -3, 'I': -3, 'H': 0, 'K': 1, 'M': -1, 'L': -3, 'N': 0, 'Q': 3, 'P': -1, 'S': 0, 'R': 0, 'T': -1, 'W': -3, 'V': -2, 'Y': -2, 'X': -1, 'Z': 4}
}
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substituion_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.alignments = self.align()
debug = True
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
for i in range(len(self.string2)+1):
for j in range(len(self.string1)+1):
if i == 0 and j == 0:
self.score_matrix[i, j] = 0
else:
self.score_matrix[i, j] = self.calc_max(i, j)
alignments = self.calc_alignments(len(self.string2), len(self.string1), [('', '')])
return alignments
def get_prevs(self, i, j):
prev = []
if i > 0 and j > 0:
if self.score_matrix[i-1, j-1] + self.substituion_matrix[self.string2[i-1]][self.string1[j-1]] == self.score_matrix[i,j]:
prev.append((-1, -1))
if j > 0 and self.score_matrix[i, j - 1] + self.gap_penalty == self.score_matrix[i,j]:
prev.append((0, -1))
if i > 0 and self.score_matrix[i-1, j] + self.gap_penalty == self.score_matrix[i,j]:
prev.append((-1, 0))
return prev
def calc_alignments(self, i, j, tpls):
result = []
if i == 0 and j == 0:
res = []
for tpl in tpls:
a = list(tpl[0])
a.reverse()
a = ''.join(a)
b = list(tpl[1])
b.reverse()
b = ''.join(b)
res.append((a, b))
result.extend(res)
else:
for num, tpl in enumerate(tpls):
prevs = self.get_prevs(i, j)
for prev in prevs:
if prev == (-1, -1):
result.extend(self.calc_alignments(i-1, j-1, [(tpl[0]+ self.string1[j-1], tpl[1]+self.string2[i-1])]))
if prev == (-1, 0):
result.extend(self.calc_alignments(i-1, j, [(tpl[0]+'-', tpl[1]+self.string2[i-1])]))
if prev == (0, -1):
result.extend(self.calc_alignments(i, j-1, [(tpl[0]+self.string1[j-1], tpl[1]+'-')]))
return result
def calc_max(self, i , j):
results = []
if i > 0 and j > 0:
res = self.score_matrix[i-1, j-1] + self.substituion_matrix[self.string2[i-1]][self.string1[j-1]]
results.append(res)
else:
results.append(-np.inf)
if j > 0:
res = self.score_matrix[i, j-1] + self.gap_penalty
results.append(res)
else:
results.append(-np.inf)
if i > 0:
res = self.score_matrix[i-1, j] + self.gap_penalty
results.append(res)
else:
results.append(-np.inf)
return max(results)
def get_best_score(self):
"""
:return: the highest score for the aligned strings, int
"""
return self.score_matrix[-1,-1]
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score
"""
return len(self.alignments)
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
return self.alignments
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as a list of lists
"""
return [list(i) for i in self.score_matrix]
if __name__ == '__main__':
test = GlobalAlignment("SCYTHE", "SCTHE", -6, blosum)
<file_sep>##############
# Exercise 2.5
##############
import aa_codon
# You can use the supplied test cases for your own testing. Good luck!
def get_orfs(genome):
dna_pattern = 'ACGT'
is_dna = all(i in dna_pattern for i in genome)
if not is_dna:
raise TypeError('The given sequence is not a valid DNA sequence')
else:
genome = genome.upper()
result = list()
for circ in range(2):
for compRev in range(2):
for offset in range(3):
starts = list()
stops = list()
for current in range(offset, len(genome), 3):
codon = genome[current:current + 3]
if codon == 'ATG':
starts.append(current)
if codon == 'TAA' or codon == 'TGA' or codon == 'TAG':
stops.append(current)
dictionary = {}
previous_stop = 0
previous_start = 0
for stop_index, stop_value in enumerate(stops):
for start_index, start_value in enumerate(starts):
if start_value < stop_value:
if start_index >= previous_stop:
if stop_value in dictionary:
dictionary[stop_value].append(start_value)
else:
dictionary[stop_value] = list()
dictionary[stop_value].append(start_value)
else:
previous_stop = start_index
break
if start_index == len(starts) - 1:
previous_stop = len(starts)
for key, value in dictionary.items():
dictionary[key] = min(value)
aa_seq = ''
if dictionary[key] < key + 3:
aa_seq = aa_codon.codons_to_aa(genome[dictionary[key]:key + 3])
else:
orf = genome[dictionary[key]:len(genome)] + (genome[0:(key + 4)])
aa_seq = aa_codon.codons_to_aa(orf)
if aa_seq == None:
aa_seq = ''
if len(aa_seq) >= 34:
if compRev == 0 and circ == 0:
result.append((dictionary[key], (key + 2), aa_seq, False))
elif compRev == 1 and circ == 0:
result.append((len(genome) - dictionary[key] - 1, len(genome) - key - 3, aa_seq, True))
elif compRev == 0 and circ == 1:
if (key + 2) - int(len(genome) / 2)-1 > 0:
result.append(((dictionary[key] + int(len(genome) / 2)) % len(genome) - 1, (key + 2) - int(len(genome) / 2)-1, aa_seq, False))
#print(dictionary)
genome = complementary(genome)
genome = reverse(genome)
genome = genome[int(len(genome) / 2) - 1: len(genome)] + genome[0:int(len(genome) / 2) - 1]
result = list(dict.fromkeys(result))
for r in result:
print(r)
return result
def get_orfs1(genome):
dna_pattern = 'ACGT'
is_dna = all(i in dna_pattern for i in genome)
if not is_dna:
raise TypeError('The given sequence is not a valid DNA sequence')
else:
genome = genome.upper()
indexes = list()
for k in range(2):
for i in range(3):
start = -1
start_index = -1
min_start = 99999999
stop_index = -1
for h in range(i, len(genome), 3):
codon = genome[h:h + 3]
if codon == 'ATG':
start = h
break
for t in range(start, len(genome), 3):
j = t % len(genome)
codon = genome[j:j + 3]
if codon == 'ATG':
start_index = j
if start_index < min_start:
min_start = start_index
if codon == 'TAA' or codon == 'TGA' or codon == 'TAG':
stop_index = j + 3
if min_start < 99999999 and stop_index > 0 and min_start < stop_index:
aa_seq = aa_codon.codons_to_aa(genome[min_start:stop_index])
if len(aa_seq) >= 34:
if k == 0:
indexes.append((min_start, stop_index - 1, aa_seq, False))
else:
indexes.append((len(genome) - min_start - 1, len(genome) - stop_index, aa_seq, True))
start_index = -1
stop_index = -1
min_start = 99999999
genome = complementary(genome)
genome = reverse(genome)
print(indexes)
return indexes
def complementary(input):
res = ''
for i in input:
if i.upper() == 'A':
res += 'T'
if i.upper() == 'T':
res += 'A'
if i.upper() == 'C':
res += 'G'
if i.upper() == 'G':
res += 'C'
return res
def reverse(input):
return input[::-1]
def read_genome(file):
lines = open(file, "r")
genome = ""
for line in lines:
genome += line.strip()
lines.close()
return genome
#genome = read_genome('tests/genome.txt')
#get_orfs(genome)
<file_sep>##############
# Exercise 2.7
##############
import aa_props_dict
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
return aa in aa_props_dict.positive_dict
def isNegativelyCharged(aa):
return aa in aa_props_dict.negative_dict
def isHydrophobic(aa):
return aa in aa_props_dict.hydrophobic_dict
def isAromatic(aa):
return aa in aa_props_dict.aromatic_dict
def isPolar(aa):
return aa in aa_props_dict.polar_dict
def isProline(aa):
return aa == 'P'
def containsSulfur(aa):
return aa in aa_props_dict.sulfur_dict
def isAcid(aa):
return aa in aa_props_dict.acid_dict
def isBasic(aa):
return aa in aa_props_dict.basic_dict
<file_sep>import numpy as np
from collections import Counter
import re
import json
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
pattern = re.compile(ALPHABET)
#config = json.loads(open('./pssm_test.json').read())
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
list_does_contain = True if all(bool(re.match('^[ACDEFGHIKLMNPQRSTVWY-]+$', item)) for item in sequences) else False
all_same_length = True if len(sequences) > 0 and all(len(l) == len(next(iter(sequences))) for l in sequences) else False
has_item = True if len(sequences) > 0 else False
if not has_item or not all_same_length or not list_does_contain or sequences==None :
raise TypeError('Invalid MSA')
else:
self.sequences = sequences
self.num_seqs, self.msa_length = self.get_size()
self.frequencies = self.freq_count()
self.ungapped_seq_length = len(self.get_primary_sequence())
self.ungapped_pri_seq_positions= list(i for i,x in enumerate(self.sequences[0]) if x != '-')
self.weighted_freq = self.get_weighted_freq()
self.p = 0.05
self.pssm_matrix = np.zeros((self.ungapped_seq_length, len(ALPHABET)-1), dtype=np.float64)
self.freq = self.frequencies[self.ungapped_pri_seq_positions, :len(ALPHABET)-1]
self.gaps = self.frequencies[self.ungapped_pri_seq_positions, len(ALPHABET)-1].reshape(self.ungapped_seq_length,1)
self.alpha = self.get_number_of_observations()-1
def freq_count(self):
frequencies = np.zeros((self.msa_length, len(ALPHABET)), dtype=np.float64)
for s in self.sequences:
for i,j in enumerate(s):
frequencies[i][AA_TO_INT[j]] += 1
return frequencies
def get_weighted_freq(self):
weighted_freq = np.zeros((self.msa_length, 21), dtype=np.float64)
curr_seq = 0
weights = self.get_sequence_weights()
for s in self.sequences:
for i,j in enumerate(s):
weighted_freq[i][AA_TO_INT[j]] += weights[curr_seq]
if i+1 == self.msa_length:
curr_seq += 1
return weighted_freq
# def get_pseudo_freq(self, bg_matrix):
# pseudo_freq = np.zeros((self.msa_length, 21), dtype=np.float64)
# curr_seq = 0
# pseudo_counts = (self.freq/self.p).dot(bg_matrix)
# for s in self.sequences:
# for i,j in enumerate(s):
# pseudo_freq[i][AA_TO_INT[j]] += pseudo_counts[curr_seq]
# if i+1 == self.msa_length:
# curr_seq += 1
# return weighted_freq
def calc_pssm(self, p, freq):
pssm_matrix = np.zeros((self.ungapped_seq_length, len(ALPHABET)-1), dtype=np.float64)
normalized_f = freq/np.sum(freq, axis=1, keepdims=True)
pssm_matrix = 2*np.log2(normalized_f/p)
pssm_matrix[np.where(pssm_matrix == 0.0) ] = -20
return np.rint(pssm_matrix).astype(np.int64)
def get_weighted_pssm(self):
p = 0.05
freq = self.weighted_freq[self.ungapped_pri_seq_positions, :len(ALPHABET)-1]
normalized_f = freq/np.sum(freq, axis=1, keepdims=True)
pssm_matrix = 2*np.log2(normalized_f/p)
pssm_matrix[np.where(normalized_f == 0.0) ] = -20
return np.rint(pssm_matrix).astype(np.int64)
def get_pssm_with_background(self, bg_matrix):
pssm_matrix = np.zeros((self.ungapped_seq_length, len(ALPHABET)-1), dtype=np.float64)
back_freq = np.sum(bg_matrix, axis=0).reshape(1,20)
aligned_freq = self.frequencies[self.ungapped_pri_seq_positions, :len(ALPHABET)-1]
normalized_f = aligned_freq/np.sum(aligned_freq, axis=1, keepdims=True)
pssm_matrix = 2*np.log2(normalized_f/back_freq)
pssm_matrix[np.where(normalized_f == 0.0) ] = -20
return np.rint(pssm_matrix).astype(np.int64)
def get_basic_pssm(self):
pssm_matrix = np.zeros((self.ungapped_seq_length, len(ALPHABET)-1), dtype=np.float64)
p = 0.05
aligned_freq = self.frequencies[self.ungapped_pri_seq_positions, :len(ALPHABET)-1]
normalized_f = aligned_freq/np.sum(aligned_freq, axis=1, keepdims=True)
normalized_f[np.where(normalized_f == 0.0) ] = (2**-10)*p
pssm_matrix = 2*np.log2(normalized_f/p)
return np.rint(pssm_matrix).astype(np.int64)
#pssm_matrix testtekilerle aynı gibi görünüyor ama test hata veriyor Initialization failed belki de get_pssm de çağırırken hata var.
def get_pssm_with_distr_gap(self):
pssm_matrix = np.zeros((self.ungapped_seq_length, len(ALPHABET)-1), dtype=np.float64)
freq = self.frequencies[self.ungapped_pri_seq_positions, :len(ALPHABET)-1]
gaps = self.frequencies[self.ungapped_pri_seq_positions, len(ALPHABET)-1].reshape(self.ungapped_seq_length,1)
p = 0.05
freq += gaps.dot(p)
normalized_f = freq/np.sum(freq, axis=1, keepdims=True)
pssm_matrix = 2*np.log2(normalized_f/p)
pssm_matrix[np.where(pssm_matrix == 0.0) ] = (2**-10)*p
pssm_matrix = np.rint(pssm_matrix).astype(np.int64)
return pssm_matrix
def get_pssm_with_background_w_gaps(self, bg_matrix):
pssm_matrix = np.zeros((self.ungapped_seq_length, len(ALPHABET)-1), dtype=np.float64)
freq = self.frequencies[self.ungapped_pri_seq_positions, :len(ALPHABET)-1]
gaps = self.frequencies[self.ungapped_pri_seq_positions, len(ALPHABET)-1].reshape(self.ungapped_seq_length,1)
back_freq = np.sum(bg_matrix, axis=0).reshape(1,20)
freq += gaps.dot(back_freq)
normalized_f = freq/np.sum(freq, axis=1, keepdims=True)
pssm_matrix = 2*np.log2(normalized_f/back_freq)
pssm_matrix[np.where(normalized_f == 0.0) ] = -20
pssm_matrix = np.rint(pssm_matrix).astype(np.int64)
return pssm_matrix
def get_weighted_pssm_with_background(self, bg_matrix):
back_freq = np.sum(bg_matrix, axis=0).reshape(1,20)
freq = self.weighted_freq[self.ungapped_pri_seq_positions, :len(ALPHABET)-1]
f = freq/np.sum(freq, axis=1, keepdims=True)
pssm_matrix = 2*np.log2(f/back_freq)
pssm_matrix[np.where(f == 0.0) ] = -20
return np.rint(pssm_matrix).astype(np.int64)
def get_weighted_pssm_with_background_distr(self, bg_matrix):
back_freq = np.sum(bg_matrix, axis=0).reshape(1,20)
freq = self.weighted_freq[self.ungapped_pri_seq_positions, :len(ALPHABET)-1]
gaps = self.weighted_freq[self.ungapped_pri_seq_positions, len(ALPHABET)-1].reshape(self.ungapped_seq_length,1)
freq += gaps.dot(back_freq)
f = freq/np.sum(freq, axis=1, keepdims=True)
pssm_matrix = 2*np.log2(f/back_freq)
pssm_matrix[np.where(f == 0.0) ] = -20
return np.rint(pssm_matrix).astype(np.int64)
def get_pssm_with_pseudocounts(self, bg_matrix, beta):
p= 0.05
freq = self.frequencies[self.ungapped_pri_seq_positions, :len(ALPHABET)-1]
pseudo_counts = (freq/p).dot(bg_matrix)
alpha = self.get_number_of_observations()-1
freq = (alpha*freq+beta*pseudo_counts)/(alpha+beta)
f = freq/np.sum(freq, axis=1, keepdims=True)
pssm_matrix = 2*np.log2(f/p)
pssm_matrix[np.where(f == 0.0) ] = -20
return np.rint(pssm_matrix).astype(np.int64)
def get_pssm_with_pseudocounts_with_gap_bg(self, bg_matrix, beta):
back_freq = np.sum(bg_matrix, axis=0).reshape(1,20)
freq = self.frequencies[self.ungapped_pri_seq_positions, :len(ALPHABET)-1]
gaps = self.frequencies[self.ungapped_pri_seq_positions, len(ALPHABET)-1].reshape(self.ungapped_seq_length,1)
freq += gaps.dot(back_freq)
pseudo_counts = (freq/back_freq).dot(bg_matrix)
alpha = self.get_number_of_observations()-1
freq = (alpha*freq+beta*pseudo_counts)/(alpha+beta)
f = freq/np.sum(freq, axis=1, keepdims=True)
pssm_matrix = 2*np.log2(f/back_freq)
pssm_matrix[np.where(f == 0.0) ] = -20
return np.rint(pseudo_counts).astype(np.int64)
def get_pssm_with_weighted_distr_bg_pseudocounts(self, bg_matrix, beta):
back_freq = np.sum(bg_matrix, axis=0).reshape(1,20)
freq = self.weighted_freq[self.ungapped_pri_seq_positions, :len(ALPHABET)-1]
gaps = self.weighted_freq[self.ungapped_pri_seq_positions, len(ALPHABET)-1].reshape(self.ungapped_seq_length,1)
freq += gaps.dot(back_freq)
pseudo_counts = (freq/back_freq).dot(bg_matrix)
alpha = self.get_number_of_observations()-1
freq = (alpha*freq+beta*pseudo_counts)/(alpha+beta)
f = freq/np.sum(freq, axis=1, keepdims=True)
pssm_matrix = 2*np.log2(f/back_freq)
pssm_matrix[np.where(f == 0.0) ] = -20
return np.rint(pssm_matrix).astype(np.int64)
def get_pssm_with_weighted_bg_pseudocounts(self, bg_matrix, beta):
back_freq = np.sum(bg_matrix, axis=0).reshape(1,20)
freq = self.weighted_freq[self.ungapped_pri_seq_positions, :len(ALPHABET)-1]
pseudo_counts = (freq/back_freq).dot(bg_matrix)
alpha = self.get_number_of_observations()-1
freq = (alpha*freq+beta*pseudo_counts)/(alpha+beta)
f = freq/np.sum(freq, axis=1, keepdims=True)
pssm_matrix = 2*np.log2(f/back_freq)
pssm_matrix[np.where(f == 0.0) ] = -20
return np.rint(pssm_matrix).astype(np.int64)
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
if self.sequences != None:
pssm = np.zeros((self.ungapped_seq_length, len(ALPHABET)-1), np.int64)
if bg_matrix and use_sequence_weights and redistribute_gaps and add_pseudocounts:
pssm = self.get_pssm_with_weighted_distr_bg_pseudocounts(bg_matrix,beta)
elif bg_matrix and redistribute_gaps and not use_sequence_weights and not add_pseudocounts:
pssm = self.get_pssm_with_background_w_gaps(bg_matrix)
elif bg_matrix and not redistribute_gaps and not use_sequence_weights and not add_pseudocounts:
pssm = self.get_pssm_with_background(bg_matrix)
elif bg_matrix and use_sequence_weights and not add_pseudocounts and not redistribute_gaps:
pssm = self.get_weighted_pssm_with_background(bg_matrix)
elif bg_matrix and use_sequence_weights and not add_pseudocounts and redistribute_gaps:
pssm = self.get_weighted_pssm_with_background_distr(bg_matrix)
elif not bg_matrix and add_pseudocounts and not use_sequence_weights and not redistribute_gaps:
pssm = self.get_pssm_with_pseudocounts(bg_matrix, beta)
elif bg_matrix and add_pseudocounts and use_sequence_weights and not redistribute_gaps:
pssm = self.get_pssm_with_weighted_bg_pseudocounts(bg_matrix, beta)
elif bg_matrix and add_pseudocounts and not use_sequence_weights and redistribute_gaps:
pssm = self.get_pssm_with_pseudocounts_with_gap_bg(bg_matrix, beta)
elif not bg_matrix and redistribute_gaps and not add_pseudocounts and not use_sequence_weights:
pssm = self.get_pssm_with_distr_gap()
elif not bg_matrix and not redistribute_gaps and not add_pseudocounts and use_sequence_weights:
pssm = self.get_weighted_pssm()
else:
pssm = self.get_basic_pssm()
return pssm
# if bg_matrix:
# back_freq = np.sum(bg_matrix, axis=0).reshape(1,20),
# self.p = back_freq
# if redistribute_gaps:
# self.freq += self.gaps.dot(self.p)
# if add_pseudocounts:
# pseudo_counts = (self.freq/self.p).dot(bg_matrix)
# self.freq = (self.alpha * self.freq + beta * pseudo_counts)/(self.alpha+beta)
# if use_sequence_weights:
# self.freq = self.weighted_freq[self.ungapped_pri_seq_positions, :len(ALPHABET)-1]
# self.gaps = self.weighted_freq[self.ungapped_pri_seq_positions, len(ALPHABET)-1].reshape(self.ungapped_seq_length,1)
# f = self.freq/np.sum(self.freq, axis=1, keepdims=True)
# self.pssm_matrix = 2*np.log2(f/self.p)
# self.pssm_matrix[np.where(f == 0.0) ] = -20
# return np.rint(self.pssm_matrix).astype(np.int64)
# if bg_matrix:
# back_freq = np.sum(bg_matrix, axis=0).reshape(1,20),
# self.p = back_freq
# if redistribute_gaps:
# self.freq += self.gaps.dot(self.p)
# if add_pseudocounts:
# pseudo_counts = (self.freq/self.p).dot(bg_matrix)
# self.freq = (self.alpha * self.freq + beta * pseudo_counts)/(self.alpha+beta)
# if use_sequence_weights:
# self.freq = self.weighted_freq[self.ungapped_pri_seq_positions, :len(ALPHABET)-1]
# self.gaps = self.weighted_freq[self.ungapped_pri_seq_positions, len(ALPHABET)-1].reshape(self.ungapped_seq_length,1)
# self.freq += self.gaps.dot(self.p)
# f = self.freq/np.sum(self.freq, axis=1, keepdims=True)
# self.pssm_matrix = 2*np.log2(f/self.p)
# self.pssm_matrix[np.where(f == 0.0) ] = -20
# print(self.pssm_matrix)
# return np.rint(self.pssm_matrix).astype(np.int64)
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
num_seqs = len(self.sequences)
msa_length = len(self.sequences[0])
return (num_seqs, msa_length)
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
return self.sequences[0].replace('-', '')
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
weights = np.zeros(self.num_seqs)
curr_seq = 0
r = np.count_nonzero(self.frequencies, axis = 1)
W = np.zeros((self.msa_length, self.num_seqs), dtype=np.float64)
weights = np.zeros(self.num_seqs)
for s in self.sequences:
for i,j in enumerate(s):
W[i][curr_seq] = 1.0/(self.frequencies[i][AA_TO_INT[j]]*r[i])
if i+1 == self.msa_length:
curr_seq += 1
weights = np.sum(W[r > 1], axis = 0)
return weights.astype(np.float64)
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
r = np.count_nonzero(self.frequencies, axis = 1)
num_obs = sum(r)/self.msa_length
return num_obs.astype(np.float64)
# pssm = MSA(config["msa_sequences"]).get_pssm_with_pseudocounts_with_gap(config["bg_matrix"],beta=10)
# print(pssm)
# # print(len(config["pssm_07"]))
# print(np.array_equal(pssm, config["pssm_08"]))
# pssm = MSA(config["msa_sequences"]).get_pssm_with_pseudocounts(config["bg_matrix"],beta=10)
# print(pssm)
# # print(len(config["pssm_07"]))
# print(np.array_equal(pssm, config["pssm_07"]))<file_sep>##############
# Exercise 2.7
##############
posCharged = ['H','R','K']
negCharged = ['D','E']
hydrophobic = ['A','V','I','L','M','F','Y','W']
aro = ['F','W','Y','H']
polar = ['R','N','D','Q','E','H','K','S','T','Y']
sulfur = ['C','U','M']
acid = ['D','E']
basic = ['H','K','R']
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
if aa in posCharged:
return True
return False
def isNegativelyCharged(aa):
if aa in negCharged:
return True
return False
def isHydrophobic(aa):
if aa in hydrophobic:
return True
return False
def isAromatic(aa):
if aa in aro:
return True
return False
def isPolar(aa):
if aa in polar:
return True
return False
def isProline(aa):
if aa == 'P':
return True
return False
def containsSulfur(aa):
if aa in sulfur:
return True
return False
def isAcid(aa):
if aa in acid:
return True
return False
def isBasic(aa):
if aa in basic:
return True
return False
<file_sep>##############
# Exercise 2.5
##############
# You can use the supplied test cases for your own testing. Good luck!
from codon import *
def helper_orfs(genome_str, is_reverse, start_index, genome_length, remainder):
results = []
strings = []
startIndices = []
stopIndices = []
counter = -1
hasStarted = False
for j in range(len(genome_str)):
if not hasStarted and (j*3) >= genome_length:
break
i = j % len(genome_str)
x = genome_str[i]
if x == 'M':
if not hasStarted and (j*3) < genome_length:
strings.append('M')
startIndices.append(j)
hasStarted = True
counter = counter + 1
else:
strings[counter] += 'M'
elif x == '$':
if hasStarted:
stopIndices.append(j)
hasStarted = False
else:
if hasStarted:
strings[counter] += x
#print("strings: {0}".format(strings))
for i in range(len(strings)):
if len(startIndices) == 0 or len(stopIndices) == 0 or i >= len(startIndices) or i >= len(stopIndices):
continue
#print("string: {0}, , startIndex: {1}, stopindex: {2}".format(strings[i], startIndices[i], stopIndices[i]))
if len(strings[i]) > 33:
if not is_reverse:
start = startIndices[i] * 3
stop = stopIndices[i] * 3 + 2
if (stop >= genome_length):
start -= start_index
stop = stop % genome_length - start_index
removes = []
for k in range(len(results)):
if results[k][0] < stop and results[k][1] < start and len(results[k][3]) < len(strings[i]):
removes.append(k)
if len(removes) > 0:
newResults = []
for k in range(len(results)):
if k not in removes:
newResults.append(results[k])
results = newResults
else:
start = startIndices[i] * 3
stop = stopIndices[i] * 3
if (start >= genome_length):
start = start % genome_length
if (stop >= genome_length):
stop = stop % genome_length
removes = []
for k in range(len(results)):
if results[k][0] > stop and results[k][1] > start and len(results[k][3]) < len(strings[i]):
removes.append(k)
if len(removes) > 0:
newResults = []
for k in range(len(results)):
if k not in removes:
newResults.append(results[k])
results = newResults
if not is_reverse:
start += start_index
stop += start_index
else:
pass
start -= 2 - start_index
stop -= 2 - start_index
if is_reverse:
start = genome_length - start
stop = genome_length - stop - 2
result = (start, stop, strings[i], is_reverse)
results.append(result)
return results
def get_orfs(genome):
genome_length = len(genome)
if genome_length < 3:
return None
try:
text = genome + genome
rem = len(text) % 3
text = text[:len(text)-rem]
f1 = codons_to_aa(text)
f1_comp = codons_to_aa(complementary(text)[::-1])
text = genome[1:] + genome
rem = len(text) % 3
text = text[:len(text)-rem]
f2 = codons_to_aa(text)
f2_comp = codons_to_aa(complementary(text)[::-1])
text = genome[2:] + genome
rem = len(text) % 3
text = text[:len(text)-rem]
f3 = codons_to_aa(text)
f3_comp = codons_to_aa(complementary(text)[::-1])
except KeyError as e:
raise TypeError("Key error: not a valid genome.")
#if f1 is None and f2 is None and f3 is None and f1_comp is None and f2_comp is None and f3_comp is None:
# raise TypeError("Type error: not a valid genome.")
#print("f1: {0}".format(f1))
#print("f2: {0}".format(f2))
#print("f3: {0}".format(f3))
#print("f1_comp: {0}".format(f1_comp))
#print("f2_comp: {0}".format(f2_comp))
#print("f3_comp: {0}".format(f3_comp))
results = []
if f1 is not None:
results += helper_orfs(f1, False, 0, genome_length, genome_length % 3)
if f2 is not None:
results += helper_orfs(f2, False, 1, genome_length - 2, (genome_length - 1) % 3)
if f3 is not None:
results += helper_orfs(f3, False, 2, genome_length - 3, (genome_length - 2) % 3)
if f1_comp is not None:
results += helper_orfs(f1_comp, True, 0, genome_length, genome_length % 3)
if f2_comp is not None:
results += helper_orfs(f2_comp, True, 1, genome_length - 2, (genome_length - 1) % 3)
if f3_comp is not None:
results += helper_orfs(f3_comp, True, 2, genome_length - 3, (genome_length - 2) % 3)
return results
def read_genome(file):
lines = open(file, "r")
genome = ""
for line in lines:
genome += line.strip()
lines.close()
return genome
if __name__ == '__main__':
# Global genome sequence (genome.txt must be in the same directory as orfs_test.py)
genome = read_genome('./genome/genome.txt')
genome = genome.upper()
genome_length = len(genome)
#genome = re.sub("[^ACGT]", "", genome)
orfs = get_orfs(genome)
print("results: {0}".format(orfs))
# GCTATGAGGTCATGGCTTCTGTAGTAACGTGAC<file_sep>import numpy as np
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substituion_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.previous_node_matrix = {}
self.alignment_paths = []
self.m = self.score_matrix.shape[0]
self.n = self.score_matrix.shape[1]
self.align()
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
for i in range(1,self.m,1):
self.score_matrix[i,0] = i*self.gap_penalty
for i in range(1,self.n,1):
self.score_matrix[0,i] = i*self.gap_penalty
for i in range(1,self.m,1):
for j in range(1,self.n,1):
self.score_matrix[i,j] = max(self.score_matrix[i-1][j-1]+self.substituion_matrix[self.string2[i-1]][self.string1[j-1]],
self.score_matrix[i][j-1]+self.gap_penalty,self.score_matrix[i-1][j]+self.gap_penalty)
prev_path_nodes = []
if i>=2 and j>=2 and self.score_matrix[i,j] == self.score_matrix[i-1][j-1]+self.substituion_matrix[self.string2[i-1]][self.string1[j-1]]:
prev_path_nodes.append((i-1,j-1))
if j >=2 and self.score_matrix[i,j] == self.score_matrix[i][j-1]+self.gap_penalty:
prev_path_nodes.append((i,j-1))
if i >= 2 and self.score_matrix[i,j] == self.score_matrix[i-1][j]+self.gap_penalty:
prev_path_nodes.append((i-1,j))
self.previous_node_matrix[(i,j)] = prev_path_nodes
currPath = []
currPath.append((self.m-1,self.n-1))
self.get_alignments_recursion(currPath)
def get_best_score(self):
"""
:return: the highest score for the aligned strings, int
"""
return self.score_matrix[-1][-1]
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score
"""
return len(self.alignment_paths)
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
res_list = []
for optimal_path in self.alignment_paths:
str1 = ""
str2 = ""
str1_idx = 1
str2_idx = 1
for node in reversed(optimal_path):
if node[0] == str2_idx: #no gaps
str2 += str(self.string2[str2_idx-1])
str2_idx += 1
else: #gap
str2 += "-"
if str2[-1] != "-":
str2_idx += 1
if node[1] == str1_idx: #no gaps
str1 += str(self.string1[str1_idx-1])
str1_idx += 1
else: #gap
str1 += "-"
if str1[-1] != "-":
str1_idx += 1
res_list.append((str1,str2))
return res_list
def get_alignments_recursion(self, currPath):
if currPath[-1] == (0,0):
return
if currPath[-1] == (1,1):
self.alignment_paths.append(currPath)
return
else:
curr_i = currPath[-1][0]
curr_j = currPath[-1][1]
for prev_node in self.previous_node_matrix[(curr_i,curr_j)]:
new_currPath = currPath.copy()
new_currPath.append(prev_node)
self.get_alignments_recursion(new_currPath)
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as a list of lists
"""
return self.score_matrix
<file_sep>import numpy as np
import logging
from typing import Set, List
from collections import Counter
from itertools import product
from pathlib import Path
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
logging.basicConfig(level=logging.DEBUG)
self.seqs: np.ndarray = np.array([])
self.full_seqs: np.ndarray = np.array(np.array(['M', 'Q'])) # just random initialization
self.buffer: List[str] = []
logging.warning(f'initialized. self.seqs: {self.seqs} - self.buffer: {self.buffer}')
self.all_words = set()
self.words_per_seq = []
self.total_count = Counter()
self.max_one_per_seq_count = Counter()
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
# logging.warning(f'self.seqs before adding: {self.seqs}')
# self.seqs = np.append(self.seqs, sequence)
self.buffer.append(sequence) # buffer them here because np.append might take longer if often called?
seq_list = []
for i in range(len(sequence) - 2):
seq_list.append(sequence[i:i + 3])
seq_count = Counter(seq_list)
self.words_per_seq.append(len(seq_count))
self.total_count.update(seq_count)
uniques = list(seq_count)
self.max_one_per_seq_count.update(Counter(uniques))
self.all_words.update(uniques)
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
if self.buffer:
self.buffer_to_seqs()
containing_mask = [word in seq for seq in self.seqs]
containing = self.seqs[containing_mask]
logging.debug(f'containing: {containing}')
logging.warning(f'word: {word} in: {len(containing)} - not in: {self.seqs.size - len(containing)}')
return containing # TODO tolist necessary?
def get_db_stats(self):
if self.buffer:
self.buffer_to_seqs()
self.num_seqs = self.seqs.size
self.num_words = len(self.all_words)
self.avg_words_per_seq = int(round(np.mean(self.words_per_seq)))
self.avg_seqs_per_word = int(round(np.mean(list(self.max_one_per_seq_count.values()))))
return self.num_seqs, self.num_words, self.avg_words_per_seq, self.avg_seqs_per_word
# def get_db_stats2(self):
# """
# Return some database statistics:
# - Number of sequences in database
# - Number of different words in database
# - Average number of words per sequence (rounded to nearest int)
# - Average number of sequences per word (rounded to nearest int)
#
# :return: Tuple with four integer numbers corresponding to the mentioned
# statistics (in order of listing above).
# """
# if self.buffer:
# self.buffer_to_seqs()
# num_seqs = self.seqs.size
# all_words2, words_per_seq2, total_count2, max_one_per_seq_count = self.get_all_words2()
# num_words2 = len(all_words2)
#
# avg_words_per_seq = np.mean(words_per_seq2)
# avg_words_per_seq = int(round(avg_words_per_seq))
# wordface_vals = list(max_one_per_seq_count.values())
# meany_mc_wordface = np.mean(wordface_vals)
# avg_seqs_per_word = int(round(meany_mc_wordface)) # TODO counting two occurences in one seq as two seqs for
# return num_seqs, num_words2, avg_words_per_seq, avg_seqs_per_word
# def get_all_words2(self):
# all_words = set()
# words_per_seq = []
# total_count = Counter()
# max_one_per_seq_count = Counter()
# for seq in self.seqs:
# seq_list = []
# for i in range(len(seq) - 2):
# seq_list.append(seq[i:i+3])
# all_words.update(seq_list)
# seq_count = Counter(seq_list)
# words_in_this_seq = len(seq_count.keys())
# words_per_seq.append(words_in_this_seq)
# total_count.update(seq_count)
# uniques = list(seq_count)
# max_one_per_seq_count.update(Counter(uniques))
# return all_words, words_per_seq, total_count, max_one_per_seq_count
def buffer_to_seqs(self):
logging.warning(f'adding from bufferset with len: {len(self.buffer)} to array with len: {self.seqs.size}')
logging.debug(f'buffer: {self.buffer}')
logging.debug(f'arr b4 buffer: {self.seqs}')
self.seqs = np.append(self.seqs, self.buffer)
logging.debug(f'arr with buffer: {self.seqs}')
self.full_seqs = self.strings_to_rows_for_full_2d(self.seqs)
logging.debug(f'self.full_seqs = {self.full_seqs}')
self.buffer = []
def strings_to_rows_for_full_2d(self, seqs):
strseqs = [list(seq) for seq in seqs]
return np.array(strseqs)
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.substitution_matrix = substitution_matrix
def get_words_seq(self, sequence, T):
res = set()
for i in range(len(sequence) - 2):
word_1 = sequence[i:i + 3]
for word_2 in product(ALPHABET, ALPHABET, ALPHABET):
score = 0
for j in range(3):
index_1 = AA_TO_INT[word_1[j]]
index_2 = AA_TO_INT[word_2[j]]
score += self.substitution_matrix[index_1][index_2]
if score >= T:
res.add(''.join(word_2))
return list(res)
def get_words_pssm(self, pssm, T):
res = set()
for i in range(len(pssm) - 2):
for word_2 in product(ALPHABET, repeat=3):
score = 0
for j in range(3):
index_2 = AA_TO_INT[word_2[j]]
score += pssm[i + j][index_2]
if score >= T:
res.add(''.join(word_2))
return list(res)
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
if sequence is not None: # seq and submatrix
# sequence_arr = np.array(list(sequence))
return self.get_words_seq(sequence, T)
else: # pssm
return self.get_words_pssm(pssm, T)
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]
def main():
logging.basicConfig(level=logging.DEBUG)
some_seqs = [
"MVQRLTYRRRLSYNTASNKTRLSRTPGNRIVYLYTKKVGKAPKSACGVLPGRLRGVVAVRPKVLMRLSKTKKHVQQGLWWLHVRQVCPDRIKRAFLIEEQKIVVKVLKAQAQSQKAK",
"MSGRLWCKAIFAGYKRGLRNQREHTALLKIEGVYARDETEFYLGKRCAYVYKAKNNTVTPGGKPNKTRVIWGKVTRAHGNSGMVRAKFRSNLPAKAIGHRIRVMLYPSRI",
"MACARPLISVYSEKGESSGKNVTLPAVFKAPIRPDIVNFVHTNLRKNNRQPYAVSELAGHQTSAESWGTGRAVARIPRVRGGGTHRSGQGAFGNMCRGGRMFAPTKTWRRWHRRVNTTQKRYAICSALAASALPALVMSKGHCVEEVPELPLVVEDKVESYKKTKEAVQLLKKLKAWNDIKKVYASQRMRAGKGKMRNRRRIQRRGPCIIYNEDNGIIKAFRNIPGITLLNVSKLNILKLAPGGHVGRFCIWTESAFRKLDELYGTWRKAASLKSNYNLPMHKMMNTDLSRILKSPEIQRALRAPRKKIHRRVLKKNPLKNLRIMLKLNPYAKTMRRNTILRQARNHKLRVKKLEAAAAALAAKSEKIVPEKGAGDKKPAVGKKGKKPVDAKKLKKPAGKKVVTKKPAEKKPTTEEKKSAA",
"MPREDRATWKSNYFLKIIQLLDDYPKCFIVGADNVGSKQMQQIRMSLRGKAVVLMGKNTMMRKAIRGHLENNPALEKLLPHIRGNVGFVFTKEDLTEIRDMLLANKVPAAARAGAIAPCEVTVPAQNTGLGPEKTSFFQALGITTKISRGTIEILSDVQLIKTGDKVGASEATLLNMLNISPFSFGLIIQQVFDNGSIYSPEVLDITEQALHTRFLEGVRNVASVCLQIGYPTVASVPHSIINGYKRVLALSVETDYTFPLAEKVKAFLADPSAFAAAAPVAAATTAAPAAAAAPAKVEAKEESEESDEDMGFGLFD",
"MYSEWRSLHLVIQNDQGHTSVLHSYPESVGREVANAVVRPLGQALGHSPVSASQSLLYTDKDVKWTMEVICYGLTLPLDGETVKYCVDVYTDWIMALVLPKDSIPLPVIKEPNLYIQSILKHLQNLFVPRQEQGSSQIRLCLQVLRAIQKLARESSIMARETWEVLLLFLLQINDILLAPPTVQGGIAENLAEKLIGVLFEVWLLACTRCFPTPPYWKTAKEMVANWRHHPAVVEQWSKVICALTSRLLRFTYGPSFPPFKVPDEDANLIPPEMDNECIAQTWFRFLHMLSNPVDLSNPAVISSTPKFQEQFLNVSGMPQELSQYPCLKHLPQIFFRAMRGISCLVDAFLGISRPRSDSAPPTPVNRLSMPQSAAVNTTPPHNRRHRAVTVNKATMKTSTVTTAHTSKVQHQASSTSPLSSPNQTSSEPRPLPAPRRPKVNSILNLFGSWLFDAAFVHCKLHNGINRDSSMTASFIQILL<KEY>VRRKQKITDIVNKYRNKQLEPEFYTALFQEVGLKNCSS",
"MGFVKVVKNKAYFKRYQVRFRRRREGKTDYYARKRLVIQDKNKYNTPKYRMIVRVTNRDIICQIAYARIEGDMIVCAAYAHELPKYGVKVGLTNYAAAYCTGLLLARRLLNRFGMDKIYEGQVEVNGDEYNVESIDGQPGAFTCYLDAGLARTTTGNKVFGALKGAVDGGLSIPHSTKRFPGYDSESKEFNAEVHRKHIMGQNVADYMRYLMEEDEDAYKKQFSQYIKNNVTPDMMEEMYKKAHAAIRENPVYEKKPKREVKKKRWNRPKMSLAQKKDRVAQKKASFLRAQERAAES",
"MP<KEY>AKLVEAIRTNYNDRYDEIRRHWGGNVLGPKSVARIAKLEKAKAKELATKLG",
"MKTILSNQTVDIPENVDITLKGRTVIVKGPRGTLRRDFNHINVELSLLGKKKKRLRVDKWWGNRKELATVRTICSHVQNMIKGVTLGFRYKMRSVYAHFPINVVIQENGSLVEIRNFLGEKYIRRVRMRTGVACSVSQAQKDELILEGNDIELVSNSAALIQQATTVKNKDIRKFLDGIYVSEKGTVQQPDE",
"MPGWRLLAQGGAQVLGGGAGGLGAAPGLGSRKNILFVVRNLHSKSSTWWDEHLSEENVSFVKQLVSDENKAQLTSLLNPLKDEPWPLHPWEPGSSRVGLIALKLGMMPLWTKDGQKHAVTLLQVQDCHVLKYTPKEDHNGKTATLTVGGKTGSRLYKANSILEFYRDLGLPPKQTTKIFHVTDNAVIKQGTPLYAAHFRPGQYVDVTAKTIGKGFQGVMKRWGFKGQPASHGQTKTHRRPGAISTGDIARVWPGTKMPGKMGNQNRTVYGLKVWRVNTKHNIIYVNGSVGHRNCLVKIKDSTLPAYKDLGKSLPFPTYFPDGDEEELPEDLYDESVRQPSDPSITFA",
"MVFRRFVEVGRVAYISFGPHAGKLVAIVDVIDQNRALVDGPCTRVRRQAMPFKCMQLTDFILKFPHSARQKYVRKAWEKADINTKWAATRWAKKIDARERKAKMTDFDRFKVMKAKKMRNRIIKTEVKKLQRAALLKASPKKAAVAKAAIAAAAAAKAKVPAKKATGPGQKAAAQKASAQKAAGQKAAPPAKGQKGQKTPAQKAPAPKAAGKKA",
]
blast_db = BlastDb()
for seq in some_seqs:
blast_db.add_sequence(seq)
blast_db.get_sequences('QRL')
boolie = 'QRL' in 'QWEASDQRLASD'
logging.debug(f'boolie: {boolie}')
floati = 484.82
logging.debug(f'floati: {floati}')
inti = int(floati)
logging.debug(f'inti: {inti}')
rounded = round(floati)
logging.debug(f'rounded: {rounded}')
if __name__ == '__main__':
main()
<file_sep>import numpy as np
import os
import json
import collections
import warnings
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].!
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
def json_data():
relative_path = os.path.dirname(__file__)
with open(os.path.join(relative_path, 'tests/pssm_test.json')) as json_file:
json_data = json.load(json_file)
return json_data
def bg_matrix(json_data):
return json_data['bg_matrix']
def msa_sequences(json_data):
return json_data['msa_sequences']
class MSA:
def __init__(self, sequences):
if len(sequences) == 0:
raise TypeError("Invalid MSA")
self.seq = sequences[0]
for sequence in sequences:
if not self.isValidSequence(sequence):
raise TypeError("Invalid MSA")
self.num_seq = len(sequences)
length = len(self.seq)
self.sequences = np.empty((0, length), dtype=np.chararray)
for sequence in sequences:
self.sequences = np.vstack((self.sequences, np.array(list(sequence), dtype=np.chararray)))
self.diffs = np.zeros(length, dtype=np.int)
self.counts = []
self.gaps = np.zeros(length, dtype=np.int)
self.seq_weights = None
self.observations = None
self.rel_pssm = None
for idx in range(0, length):
counts_in_col = collections.Counter(self.sequences[:, idx])
self.diffs[idx] = len(counts_in_col)
self.gaps[idx] = counts_in_col['-']
self.counts.append(counts_in_col)
def isValidSequence(self, sequence):
if len(sequence) != len(self.seq):
return False
return all(char in ALPHABET for char in sequence)
def pssm_without_gaps(self, log_pssm):
result = np.empty((0, 20), dtype=np.float64)
for idx, residue in enumerate(self.seq):
if residue is not '-':
result = np.vstack((result, log_pssm[idx]))
return result
def pssm_log(self, rel_pssm):
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
log_pssm = 2 * np.log2(rel_pssm)
log_pssm[np.isneginf(log_pssm)] = -20
return log_pssm
def print_array(self, array, title):
if debug == True:
print(f"##############{title}##############")
for idx, row in enumerate(array):
print(idx, row, "sum:", sum(row), "gaps:", self.gaps[idx])
def initial_abs_and_normed(self):
length = len(self.seq)
abs = np.zeros((length, 20), dtype=np.float64)
normed = np.zeros((length, 20), dtype=np.float64)
for idx, counter in enumerate(self.counts):
for key in counter:
if key is not '-':
abs[idx][AA_TO_INT[key]] = counter[key]
normed[idx] = abs[idx] / np.sum(abs[idx])
return (abs, normed)
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,
redistribute_gaps=False, add_pseudocounts=False):
if bg_matrix is None:
print("Initializing bg_matrix with 0.0025")
bg_matrix = np.full((20, 20), 0.0025, dtype=np.float64)
length = len(self.seq)
print("use_sequence_weights =", use_sequence_weights)
print("redistribute_gaps =", redistribute_gaps)
print("add_pseudocounts =", add_pseudocounts)
background_freq = np.zeros(20, dtype=np.float64)
for idx, row in enumerate(bg_matrix):
background_freq[idx] = np.sum(row)
gap_weights = self.gaps.copy()
# get abs and relative frequencies
abs, normed = self.initial_abs_and_normed()
# if necessary: use_sequence_weights
weighted = np.zeros(abs.shape)
if use_sequence_weights:
self.get_sequence_weights()
weighted_pssm = np.zeros((length, 21), dtype=np.float64)
for seq_idx, sequence in enumerate(self.sequences):
weight = self.seq_weights[seq_idx]
for idx, residue in enumerate(sequence):
weighted_pssm[idx][AA_TO_INT[residue]] += weight
abs = weighted_pssm[:, :20]
gap_weights = weighted_pssm[:,20]
weighted = abs.copy()
# sum = np.sum(abs, axis=1)
# normed = abs / sum[:, np.newaxis]
print("use_seq_wights", np.array_equal(abs, weighted))
# if necessary: redistribute gaps
if redistribute_gaps:
print(background_freq)
for idx, row in enumerate(abs):
weight = background_freq * gap_weights[idx]
# print(background_freq, "*", self.gaps[idx],"=")
# print("w", weight, "+")
# print("a", abs[idx], "=")
abs[idx] += weight
# print("r", abs[idx])
# sum = np.sum(abs[idx])
# normed[idx] = abs[idx] / sum
print("redistrib", np.array_equal(abs, weighted))
if add_pseudocounts:
absol =abs.copy()
# if use_sequence_weights:
# absol = weighted.copy()
pseudos = np.zeros(abs.shape)
adjusted = np.zeros(abs.shape)
alpha = self.get_number_of_observations() - 1
for i, row in enumerate(abs):
for c, val in enumerate(row):
value = 0
for j in range(0, 20):
value += absol[i][j] / background_freq[j] * bg_matrix[j][c]
pseudos[i, c] = value
for idx, absol_row in enumerate(absol):
# print(pseudos[idx])
adjusted[idx] = ((alpha * absol_row) + (beta * pseudos[idx])) / (alpha + beta)
abs = adjusted
# sum = np.sum(adjusted, axis=1)
# normed = adjusted / sum[:, np.newaxis]
# normalise
sum = np.sum(abs, axis=1)
normed = abs / sum[:, np.newaxis]
# divide by background frequencies
divided = np.zeros((length, 20), dtype=np.float64)
for idx, row in enumerate(normed):
divided[idx] = row / background_freq
# apply log with replacement of infinity
log_pssm = self.pssm_log(divided)
result = self.pssm_without_gaps(log_pssm)
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
# """
return np.rint(result).astype(np.int64)
def get_size(self):
return self.sequences.shape
def get_primary_sequence(self):
return self.seq.replace('-', '')
def get_sequence_weights(self):
if self.seq_weights is not None:
return self.seq_weights
sequences = self.sequences
length = len(self.seq)
weigth_to = np.zeros((self.num_seq, length), dtype=np.float64)
for idx_r in range(0, self.num_seq):
for idx_c in range(0, length):
if self.diffs[idx_c] != 1:
weigth_to[idx_r][idx_c] = 1 / (self.diffs[idx_c] * self.counts[idx_c][sequences[idx_r, idx_c]])
self.seq_weights = np.sum(weigth_to.T.copy(), axis=0)
return self.seq_weights
def get_number_of_observations(self):
if self.observations is not None:
return self.observations
self.observations = np.mean(self.diffs)
return self.observations
def create_bg():
bg = np.full((20,20), 0.002)
bg[AA_TO_INT['S']][AA_TO_INT['S']] = 0.01
bg[AA_TO_INT['A']][AA_TO_INT['S']] = 0.004
bg[AA_TO_INT['S']][AA_TO_INT['A']] = 0.004
return bg
debug = False
def main():
print("hello")
seqs_ = [
"SE-AN",
"SE-ES",
"SEVEN",
"SE-AS",
]
seqs_t = msa_sequences(json_data())
bg_ = bg_matrix(json_data())
bg = create_bg()
msa = MSA(seqs_t)
# print(msa.get_pssm(bg_matrix=bg_test, use_sequence_weights=True, redistribute_gaps= True, add_pseudocounts=True))
t111 = msa.get_pssm(bg_matrix=bg_, use_sequence_weights=True, redistribute_gaps=True, add_pseudocounts=True)
t101 = msa.get_pssm(bg_matrix=bg_, use_sequence_weights=True, redistribute_gaps=False, add_pseudocounts=True)
print("t111 equals t101", np.array_equal(t111, t101))
print(t111)
print(t101)
if __name__ == '__main__':
main()
<file_sep>import numpy as np
#import matrices_COPY
#blosum=matrices_COPY.MATRICES['blosum']
#test
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.align()
def appendUnique(self, firstList, secondList):
resultingList = firstList
for i in secondList:
if i not in resultingList:
resultingList.append(i)
return resultingList
# search from One path
def isFinalScorePath(self, scorePath):
#print("scorePath:", scorePath)
# if scorepath is empty
if len(scorePath) == 0:
return False
# take the last cell coordinates of the path
lastCell = scorePath[-1]
#print("last cell of path:", lastCell)
i = lastCell[0]
j = lastCell[1]
# test if left top cell
# Congratulations we have found a valid score path
# GLOBAL: if i == 0 and j == 0:
if self.score_matrix[i][j]==0:
return True
else:
return False
# search from One path
def getFinalScorePaths(self, scorePath):
finalScorePaths = []
pathStack = []
#print("getFinalScorePaths(", scorePath, ")")
# if scorepath is empty
if len(scorePath) == 0:
return [[[]]]
# Init the exploration stack with the starting cell
pathStack = scorePath.copy()
#print("Processing ", pathStack)
if self.isFinalScorePath(pathStack) == True:
#print("final path found:", pathStack)
#path ends with (0,0)
newPaths = [pathStack.copy()]
finalScorePaths = self.appendUnique(finalScorePaths, newPaths)
#print("finalScorePaths", finalScorePaths)
else:
# otherwise not a viable path
# try to explore 3 neighboors
# cell other than (0,0)
# take the last cell coordinates of the path
startingCell = scorePath[-1]
i = startingCell[0]
j = startingCell[1]
# horizontal
if i > 0 and self.score_matrix[i-1][j]+self.gap_penalty==self.score_matrix[i][j]:
#print("")
#print("horizontal")
nextCell = [i-1, j]
pathStack.append(nextCell)
newPaths = self.getFinalScorePaths(pathStack)
finalScorePaths = self.appendUnique(finalScorePaths, newPaths)
pathStack.pop()
# diagonal
current_score=self.score_matrix[i][j]
diag_score=self.score_matrix[i-1][j-1]
if current_score==diag_score+self.substitution_matrix[self.string2[i-1]][self.string1[j-1]]:
precedent=True
else:
precedent=False
if i > 0 and j > 0 and precedent:
#print("")
#print("diagonal")
nextCell = [i-1, j-1]
pathStack.append(nextCell)
newPaths = self.getFinalScorePaths(pathStack)
finalScorePaths = self.appendUnique(finalScorePaths, newPaths)
pathStack.pop()
# vertical
if j > 0 and self.score_matrix[i][j-1]+self.gap_penalty==self.score_matrix[i][j]:
#print("")
#print("vertical")
nextCell = [i, j-1]
pathStack.append(nextCell)
newPaths = self.getFinalScorePaths(pathStack)
finalScorePaths = self.appendUnique(finalScorePaths, newPaths)
pathStack.pop()
return finalScorePaths
def align(self):
"""
Align given strings using the Smith-Waterman algorithm.
NB: score matrix and the substitution matrix are different matrices!
"""
##### Score matrix #####
S=np.zeros((len(self.string2) + 1, len(self.string1) + 1), dtype=np.int) # score matrix
# to be constructed step by step and then score_matrix <- S
for k in range(2,len(self.string1)+len(self.string2)+1):
for i in range(1, len(self.string2)+1):
for j in range(1, len(self.string1)+1):
S[i,j]=max(0, S[i-1][j-1]+self.substitution_matrix[self.string2[i-1]][self.string1[j-1]], S[i-1][j]+self.gap_penalty, S[i][j-1]+self.gap_penalty)
print(S)
self.score_matrix=S
# find one local alignment
##### Finding Valid Score Paths #####
if np.amax(self.score_matrix)==0:
print('No local alignment found')
return []
# starting point
(start_i, start_j)=np.unravel_index(np.argmax(self.score_matrix, axis=None), self.score_matrix.shape)
# find path
scorePath = [[start_i, start_j]]
finalScorePaths = self.getFinalScorePaths(scorePath)
#print(finalScorePaths)
return finalScorePaths
def has_alignment(self):
"""
:return: True if a local alignment has been found, False otherwise
"""
return len(self.align())>0
def conversion(self):
path=self.align()[0]
path.reverse()
[start_i, start_j]=path[0]
[stop_i, stop_j]=path[-1]
# string1
head1=self.string1[0:start_j]
tail1=self.string1[stop_j:]
#string2
head2=self.string2[0:start_i]
tail2=self.string2[stop_i:]
# In between : alignment
seq1=''
seq2=''
for k in range(len(path)):
# attention : coeff (i, j ) in score_matrix corresponds
# to letters i+1 in string2, j+1 in string1
[i,j]=path[k] # k-th step
if k>0:
# diagonal step
if path[k-1][0]==i-1 and path[k-1][1]==j-1:
letter2=self.string2[i-1]
letter1=self.string1[j-1]
seq2+=letter2
seq1+=letter1
# horizontal step
if path[k-1][0]==i and path[k-1][1]==j-1:
# add gap in string2
letter2='-'
letter1=self.string1[j-1]
seq2+=letter2
seq1+=letter1
# vertical step
if path[k-1][0]==i-1 and path[k-1][1]==j:
# add gap in string1
letter2=self.string2[i-1]
letter1='-'
seq2+=letter2
seq1+=letter1
sequence1=head1+seq1+tail1
sequence2=head2+seq2+tail2
return (sequence1, sequence2)
def get_alignment(self):
"""
:return: alignment represented as a tuple of aligned strings
"""
if self.has_alignment()==False:
return ('','')
else:
return self.conversion()
def is_residue_aligned(self, string_number, residue_index):
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been alined
False otherwise
"""
return False
<file_sep># -*- coding: utf-8 -*-
"""
IMPORTANT!:
Before writing an email asking questions such as
'What does this input has to be like?' or
'What return value do you expect?' PLEASE read our
exercise sheet and the information in this template
carefully.
If something is still unclear, PLEASE talk to your
colleagues before writing an email!
If you experience technical issues or if you find a
bug we are happy to answer your questions. However,
in order to provide quick help in such cases we need
to avoid unnecessary emails such as the examples
shown above.
"""
from Bio import SeqIO # Tip: This module might be useful for parsing...
### My content
p='/Users/eva/Projects/ProtPred/pp1ss19exercise2-exercise-ge46sec/tests/P09616.xml'
# Test
############ Exercise 3: SwissProt ##########
class SwissProt_Parser:
PARSER = SeqIO
def __init__( self, path, frmt='uniprot-xml' ):
'''
Initialize every SwissProt_Parser with a path to a XML-formatted UniProt file.
An example file is included in the repository (P09616.xml).
Tip: Store the parsed XML entry in an object variable instead of parsing it
again & again ...
'''
self.sp_anno = SeqIO.parse(path,frmt)
self.record=next(self.sp_anno)
# is it saved ? why can't I visualize the parsed data ?
# swiss.sp_anno => AttributeError: type object 'SwissProt_Parser' has no attribute 'sp_anno'
# 3.2 SwissProt Identifiers
def get_sp_identifier( self ):
'''
Input:
self: Use XML entry which has been parsed & saved during object initialization
Return:
Unique SwissProt identifier for the given xml file
'''
# example: <name>HLA_STAAU</name>
identifier = self.record.id
return identifier
# 3.3 SwissProt Sequence length
def get_sp_sequence_length( self ):
'''
Input:
self: Use XML entry which has been parsed & saved during object initialization
Return:
Return sequence length of the UniProt entry as an integer.
'''
# example : <sequence length="319" mass="35904" checksum="6711C415DF7EBF30" modified="1992-12-01" version="2" precursor="true">
seq_len = len(self.record.seq)
return seq_len
# 3.4 Organism
def get_organism( self ):
'''
Input:
self: Use XML entry which has been parsed & saved during object initialization
Return:
Return the name of the organsim as stated in the corresponding field
of the XML data. Return value has to be a string.
'''
# example:
# <organism>
# <name type="scientific">Staphylococcus aureus</name>
organism = self.record.annotations['organism']
return organism
# 3.5 Localizations
def get_localization( self ):
'''
Input:
self: Use XML entry which has been parsed & saved during object initialization
Return:
Return the name of the subcellular localization as stated in the
corresponding field.
Return value has to be a list of strings.
'''
"""
From example: (?) field "comment_subcellularlocation_location"
<comment type="subcellular location">
<subcellularLocation>
<location>Secreted</location>
</subcellularLocation>
<text>Secreted as a monomer. After oligomerization and pore formation, the complex is translocated across the bilayer, probably via the Gly-rich domain of each strand.</text>
Answer : Secreted
"""
localization = self.record.annotations['comment_subcellularlocation_location']
return localization
# 3.6 Cross-references to PDB
def get_pdb_support( self ):
'''
Input:
self: Use XML entry which has been parsed & saved during object initialization
Return:
Returns a list of all PDB IDs which support the annotation of the
given SwissProt XML file. Return the PDB IDs as list.
'''
"""
Example : <dbReference type="PubMed" id="1400487"/> => get this id
<dbReference type="PubMed" id="1587866"/> idem
etc
<dbReference type="PubMed" id="8188346"/> get id
return list of ids
"""
pdb_ids = []
for i in self.record.dbxrefs:
if 'PDB:' in i:
pdb_ids.append(i[4:len(i)])
return pdb_ids
def main():
print('SwissProt XML Parser class')
return None
if __name__ == '__main__':
main()
# for i in swiss.record.dbxrefs:
# if 'PubMed' in i:
# print(i[7:len(i)])
<file_sep>import re
from typing import List
import numpy as np
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = "ACDEFGHIKLMNPQRSTVWY"
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.sequences_to_words = {}
self.sequences = []
self.words_to_sequences = {}
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
words = set(sequence[i : i + 3] for i in range(len(sequence) - 3 + 1))
self.sequences_to_words[sequence] = words
self.sequences.append(sequence)
for word in words:
try:
self.words_to_sequences[word].append(sequence)
except KeyError:
self.words_to_sequences.setdefault(word, [sequence])
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
return self.words_to_sequences[word]
@staticmethod
def _lists_to_len(l: List[list]) -> List[int]:
return list(map(lambda x: len(x), l))
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
words_per_sequence = list(
map(lambda sequence: len(self.sequences_to_words[sequence]), self.sequences)
)
sequences_per_word = list(
map(lambda seqeunces: len(seqeunces), self.words_to_sequences.values())
)
return (
len(self.sequences),
len(self.words_to_sequences),
int(np.average(words_per_sequence) + 0.5),
int(np.average(sequences_per_word) + 0.5),
)
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.substitution_matrix = substitution_matrix
def _compare_words(self, word, word_2):
value = 0
for i, j in zip(word, word_2):
value += self.substitution_matrix[AA_TO_INT[i], AA_TO_INT[j]]
return value
def _compare_words_pssm(self, pssm_line, word_2):
value = 0
for i, j in zip(pssm_line, word_2):
value += i[AA_TO_INT[j]]
return value
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
possible_words = list(
{i + j + k for i in ALPHABET for j in ALPHABET for k in ALPHABET}
)
unique_words = []
if sequence:
possible_words = list(
filter(
lambda word: self._compare_words(word, word) >= T, possible_words
)
)
for i in range(len(sequence) - 3 + 1):
word_2 = sequence[i : i + 3]
remove_words = []
for word in possible_words:
if self._compare_words(word, word_2) >= T:
unique_words.append(word)
remove_words.append(word)
for word in remove_words:
possible_words.remove(word)
else:
for i in range(len(pssm) - 3 + 1):
remove_words = []
for word in possible_words:
if self._compare_words_pssm(pssm[i : i + 3], word) >= T:
unique_words.append(word)
remove_words.append(word)
for word in remove_words:
possible_words.remove(word)
return unique_words
def _find_positions_in_query(self, query, word, T):
positions = []
for i in range(len(query) - 3 + 1):
word_2 = query[i : i + 3]
if self._compare_words(word, word_2) >= T:
positions += [i]
return positions
def _find_positions_in_pssm(self, pssm, word, T):
positions = []
for i in range(len(pssm) - 3 + 1):
pssm_line = pssm[i : i + 3]
if self._compare_words_pssm(pssm_line, word) >= T:
positions += [i]
return positions
def search_one_hit(
self, blast_db: BlastDb, *, query=None, pssm=None, T=13, X=5, S=30
):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
unique_words = self.get_words(sequence=query, pssm=pssm, T=T)
d = dict()
if query:
# return d
for word in unique_words:
regex = re.compile(f"(?=({word}))")
query_positions = self._find_positions_in_query(query, word, T)
try:
target_sequences = blast_db.get_sequences(word)
except KeyError:
continue
for query_position in query_positions:
for target in target_sequences:
target_iterator = re.finditer(regex, target)
for target_match in target_iterator:
hsp = self._get_HSP(
query,
target,
query_position,
target_match.start(),
X,
self._compare_words,
)
if hsp[3] >= S:
try:
# self._only_keep_shortes_scoring_hsp(d[target], hsp)
d[target].add(hsp)
except KeyError:
d[target] = {hsp}
else:
for word in unique_words:
regex = re.compile(f"(?=({word}))")
query_positions = self._find_positions_in_pssm(pssm, word, T)
try:
target_sequences = blast_db.get_sequences(word)
except KeyError:
continue
for query_position in query_positions:
for target in target_sequences:
target_iterator = re.finditer(regex, target)
for target_match in target_iterator:
hsp = self._get_HSP(
pssm,
target,
query_position,
target_match.start(),
X,
self._compare_words_pssm,
)
if hsp[3] >= S:
try:
# self._only_keep_shortes_scoring_hsp(d[target], hsp)
d[target].add(hsp)
except KeyError:
d[target] = {hsp}
return d
def _only_keep_shortes_highest_scoring_hsp(self, hsp_set: set, hsp):
# what if there are multiple to remove?
for hsp_ in hsp_set:
# they intersect
if hsp_[0] <= hsp[0] <= hsp_[0] + hsp_[2]:
# same score take shortest one
if hsp_[3] == hsp[3]:
if hsp_[2] > hsp[2]:
hsp_set.remove(hsp_)
hsp_set.add(hsp)
else:
continue
if hsp_[3] < hsp[3]:
hsp_set.remove(hsp_)
hsp_set.add(hsp)
else:
continue
def _only_keep_shortes_scoring_hsp(self, hsp_set: set, hsp):
# what if there are multiple to remove?
for hsp_ in hsp_set:
# they start or end on the same thing and have the same score
if (
(hsp_[3] == hsp[3])
and (hsp_[0] == hsp[0] and hsp_[1] == hsp[1])
or (
hsp[0] + hsp[2] == hsp_[0] + hsp_[2]
and hsp[1] + hsp[2] == hsp_[1] + hsp_[2]
)
):
# take shortest one
if hsp_[2] > hsp[2]:
hsp_set.remove(hsp_)
hsp_set.add(hsp)
return
else:
return
# apparently no collision, so add it
hsp_set.add(hsp)
def _get_HSP(
self, query, target, query_position, target_position, X, compare_function
):
# right todo: maybe right has to start actually with query_position+2
query_position_ = query_position + 3
target_position_ = target_position + 3
highest_right, highest_idx_right = self._extend_sequence(
query=query,
target=target,
query_position=query_position_,
target_position=target_position_,
X=X,
compare_function=compare_function,
idx_manipulation=+1,
)
# because we start one more right
highest_idx_right += 1
# left
query_position_ = query_position - 1
target_position_ = target_position - 1
highest_left, highest_idx_left = self._extend_sequence(
query=query,
target=target,
query_position=query_position_,
target_position=target_position_,
X=X,
compare_function=compare_function,
idx_manipulation=-1,
)
# because we start one more left
highest_idx_left -= 1
word_score = compare_function(
query[query_position : query_position + 3],
target[target_position : target_position + 3],
)
return (
query_position + highest_idx_left,
target_position + highest_idx_left,
highest_idx_right - highest_idx_left + 3, # 3 for the word length
highest_right + highest_left + word_score,
)
def _extend_sequence(
self,
query,
target,
query_position,
target_position,
X,
compare_function,
idx_manipulation=+1,
):
highest = current = 0
highest_idx = current_idx = 0
value_changed = False
if not (
0 <= query_position + current_idx < len(query)
and 0 <= target_position + current_idx < len(target)
):
return highest, -idx_manipulation
while highest - current < X and (
0 <= query_position + current_idx < len(query)
and 0 <= target_position + current_idx < len(target)
):
query_letter_or_line = query[query_position + current_idx]
target_letter = target[target_position + current_idx]
if isinstance(query_letter_or_line, np.ndarray):
query_letter_or_line = [query_letter_or_line]
value = compare_function(query_letter_or_line, target_letter)
current += value
if current > highest:
highest = current
highest_idx = current_idx
value_changed = True
current_idx += idx_manipulation
if value_changed:
return highest, highest_idx
else:
return highest, -idx_manipulation
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d["SEQWENCE"] = [(1, 2, 4, 13)]
<file_sep>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 17 17:36:39 2019
@author: eva
"""
### Check if the input is a DNA sequence
def check_input(seq):
if type(seq)!=str:
raise TypeError('This is not a string')
else:
elements=set(seq)
if elements=={'A','C','T','G'}:
print('Valid input')
return True
else:
raise TypeError('Unknown elements in the string')
### Cahnge indice : don't take M into account / ATG
### Rotate DNA => all possible starting points in one direction
def generate_all(seq):
"""
Input: DNA strand, string
Output : all possible strands based on rotation of the sequence, list of strings
"""
n=len(seq)
all_seq=[]
for i in range(n):
s=seq[i:n]+seq[0:i]
all_seq.append(s)
return all_seq
def only_triplets(seq):
"""
Input: One DNA sequence, already rotated, string
Output : Same sequence with exception regarding the last one or two nucleotide(s): troncated, string
NB: because we only want to keep triplets id est codons for further analysis
"""
return seq[0:len(seq)-len(seq)%3]
### Cut the DNA sequence into 6 frames
def frames(seq):
"""
input : DNA sequence
output : the 3 possible frames for one reading direction
Note : must take into account circular DNA
"""
frames=[]
# we must get a valid frame with triplets only
l=len(seq)
# straight forward, first frame
seq1=seq[0:l-l%3]
# second frame, starting from the second nucleotide, with first nucleotide at the end
# (circular DNA case)
seq2=seq[1:l]+seq[1]+seq[0]
seq2=seq2[0:l-l%3]
# third frame, built on the same principle
seq3=seq[2:l]+seq[2]+seq[1]
seq3=seq3[0:l-l%3]
frames=[seq1, seq2, seq3]
return frames
### Reversed complementary sequence
def rev_comp(seq):
"""
Input: DNA sequence, string
Output: reversed complementary sequence, string
"""
dic={'A':'T','C':'G','T':'A','G':'C'}
comp='' #complementary sequence
for c in seq:
comp+=dic[c]
rev=comp[::-1]
return rev
# Note : for the 3 other frames, apply the "frames" function to rev_comp(DNA)
### Only keep the proteins longuer than 33 amino-acids
def filter33aa(results):
"""
input: list of tuples (start, stop, aa, rev) taking into account each frame
output: list of tuples if they respect the conditions
TO DO : find all ORFs encoding proteins longer than 33, i.e.
the resulting polypeptide has at least 34 amino acids,
in all six reading frames (primary and reverse-complementary strand)"""
results33=[]
for (start, stop, aa, rev) in results:
if len(aa)>33:
results33.append((start, stop, aa, rev))
return results33
### Convert a DNA sequence into an amino-acid sequence
# Genetic code dictionary
codon_dict={
'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',
'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',
'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',
'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',
'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',
'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',
'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',
'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',
'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',
'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',
'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',
'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',
'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',
'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',
'TAC':'Y', 'TAT':'Y', 'TAA':'', 'TAG':'',
'TGC':'C', 'TGT':'C', 'TGA':'', 'TGG':'W'}
# From a codons sequence to an amino acids sequence
def codons_to_aa(ORF):
"""
Input : ORF is a DNA/RNA sequence that precisely codes for a protein,
i. e. no leading or trailing untranslated DNA/RNA residues.sequence, string
Output: AA = Corresponding Amino-acids sequence, string
Uses codon_dict for DNA->(RNA->)AA
"""
l=len(ORF)
if l%3 !=0:
return 'Invalid ORF length'
n=l//3 # nomber of triplets
# Here we should have l a multiple of 3 because we handle an ORF
AA=''
for i in range(n):
codon=ORF[3*i:3*(i+1)]
# print(codon)
aa=codon_dict[codon]
AA+=aa
return AA
### Detect start and stop codons
def detection(seq, flag):
"""
input: seq = a valid reading frame id est only triplets, string
flag = True if the ORF is parsed from the reversed strand, Boolean
Note : this function is just for one frame, which has already been computed in a previous step
output: if valid frame (start and stop codons found), returns a 4-tuples containing (in this order)
the position of the first DNA residue, the position of the last DNA residue (including stop codon),
the translated amino acid sequence as a single string, and the original flag
if no start and stop codons found, returns False
Note : Positions start at index 0 (first nucleotide in the primary strand)
and ORFs in the reverse-complementary strand must be indexed according to their position on the primary strand
(i.e. start > stop except if they are circular)
Example: (591, 704, 'MNFAKLMVRCIHMRILTMKKLMNGILIFICLHFWMIG', False)
"""
codons=[seq[3*i:3*(i+1)] for i in range(len(seq)//3)]
# print(codons)
is_started=False
codon_indexes=[]
ORFs=[]
for i in range(len(codons)):
c=codons[i]
# print(c,i)
# print('before check: is started is ' + str(is_started))
# print('start'+str(c == 'ATG'and is_started==False))
# print('stop'+str(c in ['TAG', 'TAA','TGA'] and is_started==True))
if c == 'ATG'and is_started==False: # we only take into account the first start codon
is_started=True
start=i # index of the start codon in the triplet sequence
# print('start!!!')
if c in ['TAG', 'TAA','TGA'] and is_started==True:
stop=i
# print('stop!!!')
is_started=False
codon_indexes.append((start, stop))# index of the stop codon in the triplet sequence
# print('append performed_________')
# print(codon_indexes)
# if we have found an ORF
for (i,j) in codon_indexes:
# print((i,j))
nuc_start=i*3
nuc_stop=j*3+2
# index of the first nucleotide of the sequence, including start codon
# index of the last nucleotide of the stop codon, including stop codon
orf=seq[nuc_start:nuc_stop+1]
# convert the nucleotide sequence into an AA sequence
aa=codons_to_aa(orf)
ORFs.append((nuc_start, nuc_stop, aa, flag))
# print('and now?')
return ORFs
# def getKey(item):
# return item[1]
# candidates=sorted(ORFs, key=getKey)
def longuest(candidates):
Stop_dic={} # stop:orf
for (start, stop, aa, flag) in candidates:
if stop in Stop_dic:
if len(aa)>len(Stop_dic[stop][2]):
Stop_dic[stop]=(start, stop, aa, flag)
else:
Stop_dic[stop]=(start, stop, aa, flag)
return list(Stop_dic.values())
### Getting everything together
# actual function : get_orfs
def get_orfs(DNA):
"""
input: DNA sequence, string
output: if found, the ORFs in the following format (start, stop, aa, flag)
the return value is a list of ORFs represented as 4-tuples containing (in this order)
the position of the first DNA residue, the position of the last DNA residue (including stop codon),
the translated amino acid sequence as a single string, and a flag
which is True if the ORF is parsed from the reversed strand.
!!! Positions start at index 0 (first nucleotide in the primary strand) and
ORFs in the reverse-complementary strand must be indexed according to their position on the primary strand
(i.e. start > stop except if they are circular)
Example: (591, 704, 'MNFAKLMVRCIHMRILTMKKLMNGILIFICLHFWMIG', False)
Note: index issue => if circular DNA and start=l-1 <- 0; start=l-2 <- 1
+ Must reverse the indices for the reverse sequence ???
"""
n=len(DNA)
# check if the input is DNA
if check_input(DNA)==True:
pass
# get all possible sequences:
S0=generate_all(DNA) # flag=False
S1=generate_all(rev_comp(DNA))
results=[] # empty list for results
# Direct reading
for i in range(len(S0)):
s=S0[i] # take a sequence
s=only_triplets(s) # troncate the 1 our 2 last codons if needed
flag=False # flag is False because it is the direct reading direction
ORFs=detection(s, flag)
for (start, stop, aa, flag) in ORFs:
if start in range(0,n-i):
start=start+i
else:
start=start-(n-i)
if stop in range(0,n-i):
stop=stop+i
else:
stop=stop-(n-i)
if start in range(n) and stop in range(n):
results.append((start, stop, aa, flag))
# results.append((start, stop, aa, flag))
# Reversed complementary sequence
for i in range(len(S1)):
s=S1[i]
s=only_triplets(s) # troncate the 1 or 2 last codons if needed
flag=True
ORFs=detection(s, flag)
for (start, stop, aa, flag) in ORFs:
start=n-start
stop=n-stop
### test
for (start, stop, aa, flag) in ORFs:
if start in range(0,n-i):
start=start+i
else:
start=start-(n-i)
if stop in range(0,n-i):
stop=stop+i
else:
stop=stop-(n-i)
start=(n-1)-start
stop=(n-1)-stop
# results.append((start, stop, aa, flag))
if start in range(n) and stop in range(n):
results.append((start, stop, aa, flag))
results=list(set(results))
results=longuest(results)
# print(results)
return filter33aa(results)
def old_get_orfs(DNA):
"""
input: DNA sequence, string
output: if found, the ORFs in the following format (start, stop, aa, flag)
the return value is a list of ORFs represented as 4-tuples containing (in this order)
the position of the first DNA residue, the position of the last DNA residue (including stop codon),
the translated amino acid sequence as a single string, and a flag
which is True if the ORF is parsed from the reversed strand.
!!! Positions start at index 0 (first nucleotide in the primary strand) and
ORFs in the reverse-complementary strand must be indexed according to their position on the primary strand
(i.e. start > stop except if they are circular)
Example: (591, 704, 'MNFAKLMVRCIHMRILTMKKLMNGILIFICLHFWMIG', False)
Note: index issue => if circular DNA and start=l-1 <- 0; start=l-2 <- 1
+ Must reverse the indices for the reverse sequence ???
"""
l=len(DNA)
# check if the input is DNA
if check_input(DNA)==True:
pass
# 3 frames in the first direction
f1=frames(DNA)
# 3 frames in the other direction
f2=frames(rev_comp(DNA))
# print(f1,f2)
results=[] # empty list for results
# Direct reading
for i in range(len(f1)):
f=f1[i] # take a frame
flag=False # flag is False because it is the direct reading direction
if detection(f,flag)==False:
pass # no valid sequence found
else:
(start, stop)=detection(f, flag)[0:2]
# handling the indices in the case of circular DNA
if i==1 and stop==l-1:
stop=0
elif i==2 and stop==l-1:
stop=0
elif i==2 and stop==l-2:
stop=1
results.append(detection(f,flag))
# Reversed complementary sequence
for i in range(len(f2)):
f=f2[i]
flag=True
if detection(f,flag)==False:
pass # no valid sequence detected
else:
(start, stop)=detection(f, flag)[0:2]
# we must reverse the indices
# ORFs in the reverse-complementary strand must be indexed according to their position
# on the primary strand, (i.e. start > stop except if they are circular)
start=(l-1)-start
stop=(l-1)-stop
# not sure here
if i==1 and stop==l-1:
stop=0
elif i==2 and stop==l-1:
stop=0
elif i==2 and stop==l-2:
stop=1
results.append(detection(f,flag))
print(results)
return assert33aa(results)
# issues : several times the same (valid) output printed ???
# indexes ?
# handling of circular DNA ?
<file_sep>##############
# Exercise 1.5
##############
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
# number of proteins should be number of tuples in the list
return len(self.__sequences)
def get_average_length(self):
average = 0
for sequence in self.__sequences:
average += len(sequence[1]) # add up all sequence lengths
# divide by the number of proteins
return float(average)/float(self.get_counts())
def read_fasta(self, path):
file = open(path, "r")
fasta_header = ''
fasta_data = ''
while True:
line = file.readline()
if line == '':
self.__sequences.append((fasta_header, fasta_data)) # add data to list
break
elif line[0] == '>': # check if line starts with >
fasta_header += line[:len(line)-1]
elif line != '\n':
if line[-2:] == '*\n':
# remove * if there is one
fasta_data += line[:len(line)-2]
else:
fasta_data += line[:len(line)-1]
elif line == '\n':
self.__sequences.append((fasta_header, fasta_data)) # add data to list
fasta_header = ''
fasta_data = ''
return self.__sequences
def get_abs_frequencies(self):
# return number of occurences not normalized by length
# return number of occurences not normalized by length
amino_acids_dist = {
'F': 0,
'L': 0,
'I': 0,
'M': 0,
'V': 0,
'S': 0,
'P': 0,
'T': 0,
'A': 0,
'Y': 0,
'H': 0,
'Q': 0,
'N': 0,
'K': 0,
'D': 0,
'E': 0,
'C': 0,
'W': 0,
'R': 0,
'G': 0
}
for sequence in self.__sequences: # loop over all proteins
for amino_acid in amino_acids_dist.keys(): # loop over all amino acids
amino_acids_dist[amino_acid] += sequence[1].upper().count(amino_acid)
return amino_acids_dist
def get_av_frequencies(self):
# return number of occurences normalized by length
amino_acids_dist_avg = {
'F': 0.0,
'L': 0.0,
'I': 0.0,
'M': 0.0,
'V': 0.0,
'S': 0.0,
'P': 0.0,
'T': 0.0,
'A': 0.0,
'Y': 0.0,
'H': 0.0,
'Q': 0.0,
'N': 0.0,
'K': 0.0,
'D': 0.0,
'E': 0.0,
'C': 0.0,
'W': 0.0,
'R': 0.0,
'G': 0.0
}
# get the absolute distribution of amino acids
amino_acids_dist = self.get_abs_frequencies()
total_length = 0
for sequence in self.__sequences:
total_length += len(sequence[1]) # length of all proteins combined
for amino_acid in amino_acids_dist.keys():
# divide them by the number of amino acids in all proteins
amino_acids_dist_avg[amino_acid] = float(
amino_acids_dist[amino_acid])/float(total_length)
return amino_acids_dist_avg
<file_sep>import numpy as np
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substituion_matrix = matrix
#print(self.substituion_matrix["A"]["A"])
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.align()
#compute score matrix
#print(self.score_matrix)
#print(self.score_matrix[1][2])
##fill first row and colum
#[row][colum]
gp=0
for i in range(len(self.string2)+1):
self.score_matrix[i][0]=gp
gp=gp+self.gap_penalty
gp=0
for i in range(len(self.string1)+1):
self.score_matrix[0][i]=gp
gp=gp+self.gap_penalty
##fill rest
for i in range(1,len(self.string2)+1):
for y in range (1,len(self.string1)+1):
self.score_matrix[i][y]=self.get_max_alg(i,y)
#print(self.score_matrix)
def get_max_alg(self,i,y):
diag=self.score_matrix[i-1,y-1]+self.substituion_matrix[self.string2[i-1]][self.string1[y-1]]
left=self.score_matrix[i,y-1]+self.gap_penalty
up=self.score_matrix[i-1,y]+self.gap_penalty
return max(diag,left,up)
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
def get_best_score(self):
"""
:return: the highest score for the aligned strings, int
"""
#return max(self.score_matrix[len(self.string2)][len(self.string1)],self.score_matrix[len(self.string2)][len(self.string1)],self.score_matrix[len(self.string2)][len(self.string1)])
return self.score_matrix[len(self.string2)][len(self.string1)]
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score
"""
return self.get_amount_of_pred(len(self.string2),len(self.string1))
def get_amount_of_pred(self,i,y):
#ende
if i<1 or y<1:
return 0
if i==1 and y==1:
return 1
current_score=self.score_matrix[i][y]
up=self.score_matrix[i-1][y]+self.gap_penalty
diag=self.score_matrix[i-1][y-1]+self.substituion_matrix[self.string2[i-1]][self.string1[y-1]]
left=self.score_matrix[i][y-1]+self.gap_penalty
out=0
if up==current_score:
out=out+self.get_amount_of_pred(i-1,y)
if diag==current_score:
out=out+self.get_amount_of_pred(i-1,y-1)
if left==current_score:
out=out+self.get_amount_of_pred(i,y-1)
return out
def get_ali_rec(self,i,y):
#ende
if i<1 or y<1:
return 0
if i==1 and y==1:
return 1
current_score=self.score_matrix[i][y]
up=self.score_matrix[i-1][y]+self.gap_penalty
diag=self.score_matrix[i-1][y-1]+self.substituion_matrix[self.string2[i-1]][self.string1[y-1]]
left=self.score_matrix[i][y-1]+self.gap_penalty
out=0
if up==current_score:
out=self.get_ali_rec(i-1,y)
if out==1:
self.trace_matrix[i-1][y]=1
if diag==current_score:
out=self.get_ali_rec(i-1,y-1)
if out==1:
self.trace_matrix[i-1][y-1]=1
if left==current_score:
out=self.get_ali_rec(i,y-1)
if out==1:
self.trace_matrix[i][y-1]=1
return out
#######gap am ANFANG====?????
def get_next_char(self,i,y,ali):
#ende
if i<1 or y<1:
return ""
string2=ali[0]
string1=ali[1]
if i==1 and y==1:
string2=string2+self.string2[0]
string1=string1+self.string1[0]
ali=(string1[::-1],string2[::-1])
#print(ali)
#print((self.string2,self.string1))
self.alignments.append((string1[::-1],string2[::-1]))
current_score=self.score_matrix[i][y]
up=self.score_matrix[i-1][y]+self.gap_penalty
diag=self.score_matrix[i-1][y-1]+self.substituion_matrix[self.string2[i-1]][self.string1[y-1]]
left=self.score_matrix[i][y-1]+self.gap_penalty
out=0
temp=[]
if up==current_score:
#self.get_next_char(i-1,y,(string2+"-",string1+self.string1[y-1]))
self.get_next_char(i-1,y,(string2+self.string2[i-1],string1+"-"))
if diag==current_score:
self.get_next_char(i-1,y-1,(string2+self.string2[i-1],string1+self.string1[y-1]))
if left==current_score:
#self.get_next_char(i,y-1,(string2+self.string2[i-1],string1+"-"))
self.get_next_char(i,y-1,(string2+"-",string1+self.string1[y-1]))
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
return [
('ADMI-NS', 'ADMIRES'), ('ADMIN-S', 'ADMIRES')
]
"""
#print(self.score_matrix)
self.trace_matrix = np.zeros((len(self.string2) + 1, len(self.string1) + 1), dtype=np.int)
#self.get_ali_rec(len(self.string2),len(self.string1))
#print(self.trace_matrix)
self.alignments=[]
self.get_next_char(len(self.string2),len(self.string1),("",""))
print(self.alignments)
return self.alignments
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as a list of lists
return [
[0, -1, -2, -3, -4, -5, -6],
[-1, 1, 0, -1, -2, -3, -4],
[-2, 0, 2, 1, 0, -1, -2],
[-3, -1, 1, 3, 2, 1, 0],
[-4, -2, 0, 2, 4, 3, 2],
[-5, -3, -1, 1, 3, 4, 3],
[-6, -4, -2, 0, 2, 3, 4],
[-7, -5, -3, -1, 1, 2, 4]
]
"""
return self.score_matrix
<file_sep>from pathlib import Path
from functools import reduce
from itertools import product
import re
from operator import itemgetter
from timeit import default_timer as timer
import numpy as np
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
# all possible words
WORDS = [''.join(word) for word in product(ALPHABET, repeat=3)]
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
def _word_pairs_with_distance(words_with_positions: list, A: int) -> set:
"""
Create every word pair which do not overlap and has a distance <= A
"""
word_pairs = set()
for (word1, loc1), (word2, loc2) in product(words_with_positions, repeat=2):
# only consider cases where loc1 < loc2, we do not want duplicate cases in reverse order
# ignore overlapping words
if loc1 >= loc2 or loc2 - loc1 < 3 or loc2 - loc1 > A:
continue
word_pairs.add((word1, word2, loc1, loc2))
return word_pairs
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.sequences = []
# use index of sequences instead of sequences themselves
# each element is a set
self.words_in_sequences = {}
self.word_counts = []
# use this two get all word tuples from a sequence
self.sequence_words = []
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
word_list = set()
self.sequences.append(sequence)
self.word_counts.append(0)
sequence_index = len(self.sequences) - 1
sequence_length = len(sequence)
for i in range(sequence_length - 2):
word = sequence[i:i+3]
word_list.add((word, i))
if word not in self.words_in_sequences:
self.words_in_sequences[word] = set()
if sequence_index not in self.words_in_sequences[word]:
self.word_counts[sequence_index] += 1
self.words_in_sequences[word].add(sequence_index)
self.sequence_words.append((sequence_index, word_list))
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
if word not in self.words_in_sequences:
return []
return list(map(lambda i: self.sequences[i], self.words_in_sequences[word]))
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
sequences_count = len(self.sequences)
words_count = len(self.words_in_sequences)
return (
sequences_count,
words_count,
round(sum(self.word_counts) / sequences_count),
round(
reduce(lambda tot, _list: tot + len(_list), self.words_in_sequences.values(), 0) / words_count
)
)
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.sub = substitution_matrix
def get_score(self, i, word, query=None, pssm=None):
score = 0
word_length = len(word)
for j in range(word_length):
if query:
score += self.sub[AA_TO_INT[query[i+j]]][AA_TO_INT[word[j]]]
else:
score += pssm[i+j][AA_TO_INT[word[j]]]
return score
def get_words_for_position(self, i, sequence=None, pssm=None, T=11):
words = set()
for cand_word in WORDS:
score = self.get_score(i, cand_word, sequence, pssm)
if score >= T:
words.add(cand_word)
return words
def get_words_with_position(self, *, sequence=None, pssm=None, T=11):
words_with_positions = set()
if sequence:
seq_len = len(sequence)
else:
seq_len = pssm.shape[0]
for i in range(seq_len - 2):
words_with_positions.update((word, i) for word in self.get_words_for_position(i, sequence, pssm, T))
return words_with_positions
def get_word_position_map(self, *, sequence=None, pssm=None, T=11):
word_position_map = {}
if sequence:
seq_len = len(sequence)
else:
seq_len = pssm.shape[0]
for i in range(seq_len - 2):
for word in self.get_words_for_position(i, sequence, pssm, T):
if word not in word_position_map:
word_position_map[word] = [i]
else:
word_position_map[word].append(i)
return word_position_map
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
words = set()
if sequence:
seq_len = len(sequence)
else:
seq_len = pssm.shape[0]
for i in range(seq_len - 2):
words.update(self.get_words_for_position(i, sequence, pssm, T))
return words
def search_one_hit(self, blast_db: BlastDb, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
res = {}
words_with_positions = self.get_words_with_position(sequence=query, pssm=pssm, T=T)
if query:
len_query = len(query)
else:
len_query = pssm.shape[0]
for word, i in words_with_positions:
if word in blast_db.words_in_sequences:
for sequence_index in blast_db.words_in_sequences[word]:
sequence = blast_db.sequences[sequence_index]
len_seq = len(sequence)
for match in re.finditer(f"(?={word})", sequence):
j = match.start()
# i.e. no left move
best_l = 1
# compute initial score
k = 3
score = self.get_score(i, sequence[j:j+k], query, pssm)
best_k = 3
max_score = score
k = 4
while True:
if i+k > len_query or j+k > len_seq:
break
# compute incrementally
score += self.get_score(i+k-1, sequence[j+k-1:j+k], query, pssm)
if score <= max_score - X:
break
if score > max_score:
max_score = score
best_k = k
k += 1
# continue from best alignment
k = best_k
score = max_score
l = 2
while True:
if i-l+1 < 0 or j-l+1 < 0:
break
# compute incrementally
score += self.get_score(i-l+1, sequence[j-l+1:j-l+2], query, pssm)
if score <= max_score - X:
break
if score > max_score:
max_score = score
best_l = l
l += 1
l = best_l
if max_score >= S:
if sequence not in res:
res[sequence] = set()
res[sequence].add((i-l+1, j-l+1, l+k-1, max_score))
return res
def hsp_duplicate(self, found_hsp, new_len, l_index, r_index):
# print('found_hsp', found_hsp)
for hsp_start, hsp_len, hsp_dist in found_hsp:
hsp_end = hsp_start + hsp_len - 1
# print('HSP', l_index, new_len, 'of', hsp_start, hsp_len)
if (
(hsp_len == new_len or
hsp_dist == new_len) and (
(l_index - hsp_start >= -2 and l_index - hsp_end <= 2)
or (r_index - hsp_start >= -2 and r_index - hsp_end <= 2)
)
):
# print('DUP', l_index, new_len, 'of', hsp_start, hsp_len)
return True
return False
def hsp_duplicate_post(self, res_seq):
res_seq_sorted = sorted(res_seq, key=lambda _res: (_res[1], _res[2]))
res_seq_filtered = []
for _res in res_seq_sorted:
l_index = _res[1]
new_len = _res[2]
r_index = l_index + new_len - 1
is_dup = False
for prev_res in res_seq_filtered:
hsp_start = prev_res[1]
hsp_len = prev_res[2]
hsp_end = hsp_start + hsp_len - 1
if (
(l_index == hsp_start
) and (
(l_index - hsp_start >= -2 and l_index - hsp_end <= 0)
or (r_index - hsp_start >= -2 and r_index - hsp_end <= 0)
)
):
# print('DUP2', l_index, new_len, 'of', hsp_start, hsp_len)
is_dup = True
break
if not is_dup:
res_seq_filtered.append(_res)
return res_seq_filtered
def two_hit_alg(self, loc1, loc2, l_index, r_index, sequence, query, pssm, len_query, found_hsp, new_len, S, X):
# print(sequence[l_index:l_index+3], loc1, l_index, sequence[r_index:r_index+3], loc2, r_index, sequence, query)
len_seq = len(sequence)
i = loc2
j = r_index
# compute initial score
score = self.get_score(i, sequence[j:j+3], query, pssm)
# print(i, j, 3, query and query[i:i+3], sequence[j:j+3], f'({score})')
# init max_score
best_l = 1
max_score = score
l = 2
k = 3 # i.e. no right move
while True:
if i-l+1 < 0 or j-l+1 < 0:
break
# compute incrementally
diff = self.get_score(i-l+1, sequence[j-l+1:j-l+2], query, pssm)
score += diff
# print(i-l+1, j-l+1, l+k-1, query and query[i-l+1:i+k], sequence[j-l+1:j+k], f'({score})', diff)
if score <= max_score - X:
break
if score > max_score:
max_score = score
best_l = l
l += 1
# keep best_l
l = best_l
# i.e. no right move
best_k = 3
query_start = i-l+1
target_start = j-l+1
# continue from best alignment
score = max_score
# check if extension reached to H_L
if target_start - l_index > 2:
return
k = 4
while True:
if i+k > len_query or j+k > len_seq:
break
# compute incrementally
diff = self.get_score(i+k-1, sequence[j+k-1:j+k], query, pssm)
score += diff
# print(query_start, target_start, l+k-1, query and query[query_start:i+k], sequence[target_start:j+k], f'({score})', diff)
if score <= max_score - X:
break
if score > max_score:
max_score = score
best_k = k
k += 1
# keep best_k
k = best_k
length = l+k-1
score = max_score
found_hsp.add((target_start, length, new_len))
# print('HSP add', l_index, r_index, (target_start, length, new_len))
if score >= S:
return (query_start, target_start, length, score)
def search_two_hit(self, blast_db: BlastDb, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
res = {}
word_position_map = self.get_word_position_map(sequence=query, pssm=pssm, T=T)
words = word_position_map.keys()
len_query = len(query) if query else pssm.shape[0]
for sequence_index, sequence_words in blast_db.sequence_words:
res_seq = set()
sequence = blast_db.sequences[sequence_index]
found_hsp = set()
sequence_words = list(filter(lambda word_pos: word_pos[0] in words, sequence_words))
sequence_words = sorted(sequence_words, key=lambda seq_word: (word_position_map[seq_word[0]][0], word_position_map[seq_word[0]][0]))
sequence_word_tuples = _word_pairs_with_distance(sequence_words, A)
# sort sequence tuples according to position of H_L and H_R in in sequence in coresponding order
sequence_word_tuples = sorted(sequence_word_tuples, key=lambda word_tuple: (word_position_map[word_tuple[0]][0], word_position_map[word_tuple[1]][0]))
for word1, word2, l_index, r_index in sequence_word_tuples:
new_len = r_index - l_index + 1
# check location of words in query (can be multiple occurences) and check if distance is r_index - l_index
tuples = [(loc1, loc2) for loc1 in word_position_map[word1] for loc2 in word_position_map[word2] if loc2 - loc1 == new_len - 1]
for loc1, loc2 in tuples:
# check if (l_index, r_index) tuple is a candidate, both words should not be partially included in any HSP
if self.hsp_duplicate(found_hsp, new_len, l_index, r_index):
pass # continue
_res = self.two_hit_alg(loc1, loc2, l_index, r_index, sequence, query, pssm, len_query, found_hsp, new_len, S, X)
if _res is not None:
res_seq.add(_res)
res_seq = self.hsp_duplicate_post(res_seq)
if len(res_seq) > 0:
res[sequence] = res_seq
return res
<file_sep>##############
# Exercise 2.5
##############
# You can use the supplied test cases for your own testing. Good luck!
# Genetic code dictionary
codon_dict = {
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'CTT': 'L', 'CTC': 'L', 'CTA': 'L', 'CTG': 'L', 'ATT': 'I', 'ATC': 'I',
'ATA': 'I', 'ATG': 'M', #START
'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V', 'TCT': 'S', 'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'CCT': 'P', 'CCC': 'P',
'CCA': 'P', 'CCG': 'P', 'ACT': 'T', 'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A',
'TAT': 'Y', 'TAC': 'Y', 'TAA': '*', 'TAG': '*', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q', 'CAG': 'Q', 'AAT': 'N', 'AAC': 'N',
'AAA': 'K', 'AAG': 'K', 'GAT': 'D', 'GAC': 'D', 'GAA': 'E', 'GAG': 'E', 'TGT': 'C', 'TGC': 'C', 'TGA': '*', 'TGG': 'W',
'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R', 'AGT': 'S', 'AGC': 'S', 'AGA': 'R', 'AGG': 'R', 'GGT': 'G', 'GGC': 'G',
'GGA': 'G', 'GGG': 'G'
}
# Exercise code
def codons_to_aa(orf):
if len(orf) % 3 is not 0:
return None
# Iterate with step 3 (per triplet)
codons = [orf[i:i+3] for i in range(0, len(orf), 3)]
# Join values to create a new string with AAs
aa_seq = ''.join(codon_dict[c] for c in codons)
# print(aa_seq)
return aa_seq
# Find complementary strand (function from exercise 0)
def complementary(basesString):
complementaryString = ''
for base in basesString:
if base == 'A':
complementaryString = complementaryString + 'T'
elif base == 'G':
complementaryString = complementaryString + 'C'
elif base == 'T':
complementaryString = complementaryString + 'A'
elif base == 'C':
complementaryString = complementaryString + 'G'
else:
print("Unknown input")
return complementaryString
def get_orfs(genome_par):
genome = genome_par.upper()
if any(n not in ('G', 'T', 'A', 'C') for n in genome):
raise TypeError("Not a valid DNA sequence")
genome_piece = ""
starting_point = 0;
ending_point = 0;
count = 0;
orfs = []
# Start reading the genome from pos 0, 1 and 2
for i in range(0, 3):
protein = ""
# Iterate from the first base, till the end of the genome string, with step 3
for j in range(i, len(genome) - 2, 3):
# Initiate current codon to empty string
codon = ""
# Current codon string contains current base and the next 2 bases
codon = genome[j] + genome[j + 1] + genome[j + 2]
# Turn to aminoacids
aa = codons_to_aa(codon)
# If it is one of the stop codons (aminoacid implemented as a star here)
if aa == "*":
count = 0
# We're in aa, so the position + 2 is the nucleotide index in the DNA sequence
starting_point = j + 2
if len(protein) > 33:
# Add to orfs list
orfs.append(tuple((starting_point, ending_point, protein, True)))
protein = ""
j = j + 1
if count == 1:
# Add aa to the protein string
protein = protein + aa
# If the current aa is the one mappig to the starting codon
if aa == "M" and count == 0:
# starting point is the first base index
starting_point = j + 1
count = 1
protein = aa
# Reverse and get the complementary strand
genome = genome[::-1]
genome = complementary(genome)
# Same calculation as before
# TODO Group together and if between the primary and the complementary strand case
# Start reading the genome from pos 0, 1 and 2
for i in range(0, 3):
protein = ""
# Iterate from the first base, till the end of the genome string, with step 3
for j in range(i, len(genome) - 2, 3):
# Initiate current codon to empty string
codon = ""
# Current codon string contains current base and the next 2 bases
codon = genome[j] + genome[j + 1] + genome[j + 2]
# Turn to aminoacids
aa = codons_to_aa(codon)
# If it is one of the stop codons (aminoacid implemented as a star here)
if aa == "*":
count = 0
# We're in aa, so the position + 2 is the nucleotide index in the DNA sequence
starting_point = j + 2
if len(protein) > 33:
# Since we are in the complementary strand, we have turned it and we map to the initial index of the bases
starting_point = len(genome) - starting_point - 1
ending_point = len(genome) - ending_point - 1
# Add to orfs list
orfs.append(tuple((starting_point, ending_point, protein, True)))
protein = ""
j = j + 1
if count == 1:
# Add aa to the protein string
protein = protein + aa
# If the current aa is the one mappig to the starting codon
if aa == "M" and count == 0:
# starting point is the first base index
starting_point = j + 1
count = 1
protein = aa
return orfs
# print(codons_to_aa("ATGGGTAGTAGGATGATAGTA"))
# print(get_orfs('TTAATGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGTAACAT'))<file_sep>##############
# Exercise 2.5
##############
import os
codon_dict = {
'GCA' : 'A',
'GCC' : 'A',
'GCT' : 'A',
'GCG' : 'A',
'TGC' : 'C',
'TGT' : 'C',
'GAC' : 'D',
'GAT' : 'D',
'GAA' : 'E',
'GAG' : 'E',
'TTC' : 'F',
'TTT' : 'F',
'GGA' : 'G',
'GGC' : 'G',
'GGG' : 'G',
'GGT' : 'G',
'CAC' : 'H',
'CAT' : 'H',
'ATA' : 'I',
'ATC' : 'I',
'ATT' : 'I',
'AAA' : 'K',
'AAG' : 'K',
'CTA' : 'L',
'CTC' : 'L',
'CTG' : 'L',
'CTT' : 'L',
'TTA' : 'L',
'TTG' : 'L',
'ATG' : 'M',
'AAC' : 'N',
'AAT' : 'N',
'CCA' : 'P',
'CCC' : 'P',
'CCG' : 'P',
'CCT' : 'P',
'CAA' : 'Q',
'CAG' : 'Q',
'AGA' : 'R',
'AGG' : 'R',
'CGA' : 'R',
'CGC' : 'R',
'CGG' : 'R',
'CGT' : 'R',
'AGC' : 'S',
'AGT' : 'S',
'TCA' : 'S',
'TCC' : 'S',
'TCG' : 'S',
'TCT' : 'S',
'ACA' : 'T',
'ACC' : 'T',
'ACG' : 'T',
'ACT' : 'T',
'GTA' : 'V',
'GTC' : 'V',
'GTG' : 'V',
'GTT' : 'V',
'TGG' : 'W',
'TAA' : 'X',
'TAG' : 'X',
'TGA' : 'X',
'TAC' : 'Y',
'TAT' : 'Y'
}
def get_orfs(genome):
for i in range(len(genome)):
if genome[i:i + 1] == 'A' or genome[i:i + 1] == 'C' or genome[i:i + 1] == 'G' or genome[i:i + 1] == 'T':
continue
else:
raise TypeError
reversed_genome = "".join(reverse_genome(genome))
reversed_genome_read = reversed_genome[::-1]
orfs = find_orfs(genome, False)
reversed_orfs = find_orfs(reversed_genome_read, True)
# fixing the indices of the reversed one
for i in reversed_orfs:
i[0] = len(reversed_genome_read) - i[0] - 1
i[1] = len(reversed_genome_read) - i[1] - 1
tuples = [tuple(l) for l in orfs + reversed_orfs]
return tuples
def reverse_genome(genome):
tempGenome = list(genome)
for i in range(len(tempGenome)):
if tempGenome[i] == 'A':
tempGenome[i] = 'T'
elif tempGenome[i] == 'C':
tempGenome[i] = 'G'
elif tempGenome[i] == 'G':
tempGenome[i] = 'C'
elif tempGenome[i] == 'T':
tempGenome[i] = 'A'
return tempGenome
def find_all(genome, triplet):
loc = list()
for i in range(0, len(genome)-2):
if genome[i] == triplet[0] and genome[i+1] == triplet[1] and genome[i+2] == triplet[2]:
loc.append(i)
return loc
def get_translated_sequence(genome):
protein = []
for i in range(0, len(genome) + 1, 3):
protein.append(codon_dict.get(genome[i:i + 3]))
return ''.join(protein[:-1])
def find_orfs(genome, is_reverse_complementary):
start_list = find_all(genome, 'ATG')
start_list.sort()
stop_list = list()
for stop in ['TAA', 'TGA', 'TAG']:
stop_list += find_all(genome, stop)
stop_list.sort()
orfs = []
for i in start_list:
for j in stop_list:
if (j - i) % 3 == 0:
if j - i > 99:
orfs.append([i, j + 2, get_translated_sequence(genome[i:j]), is_reverse_complementary])
break
orfs = remove_ones_includes_ending(orfs)
orfs = remove_overlapping_orfs(orfs)
return orfs
def remove_ones_includes_ending(orfs):
temp = []
for codon in orfs:
if codon[2].find('X') == -1:
temp.append(codon)
return temp
def remove_overlapping_orfs(orfs):
for o in orfs:
indices = [o]
for j in orfs:
if o[1] == j[1] and o != j:
indices.append(j)
if len(indices) > 1:
indices.sort(key=lambda o: o[0])
indices = indices[1:]
for o in indices:
orfs.remove(o)
return orfs
<file_sep>##############
# Exercise 2.6
##############
from collections import Counter
from orffinder import aa_dist
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.sequence_names, self.sequences = self.read_fasta(filepath)
def get_counts(self):
return len(self.sequences)
def get_average_length(self):
sum = 0
for x in self.sequences:
sum += len(x)
return sum / self.get_counts()
def read_fasta(self, path):
sequence_names = []
sequences = []
tmp_sequence = ''
with open(path) as f:
for line in f:
line = line.strip()
if not line or line.startswith(';'): # Ignore comments
if tmp_sequence:
sequences.append(tmp_sequence)
tmp_sequence = ''
continue
if line.startswith(">"):
active_sequence_name = line
sequence_names.append(active_sequence_name)
tmp_sequence = ''
else:
tmp_sequence = ''.join([tmp_sequence, line])
if tmp_sequence:
sequences.append(tmp_sequence)
# Remove trailing Stop Codon
for index, sequence in enumerate(sequences):
if sequence.endswith('*'):
sequence = sequence[:-1]
sequences[index] = sequence
return sequence_names, sequences
def get_abs_frequencies(self):
counted = Counter(''.join(self.sequences))
return counted
def get_av_frequencies(self):
# return number of occurrences normalized by length
return aa_dist(''.join(self.sequences))
<file_sep>import numpy as np
from itertools import chain
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1 # columns
self.string2 = string2 # rows
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.align()
self.alignments = list(self._backtrack("", "", 0, len(string2), len(string1)))
def _score(self, row, col):
m = self.score_matrix
c1 = self.string1[col-1]
c2 = self.string2[row-1]
v1 = m[row-1][col-1] + self.substitution_matrix[c1][c2]
v2 = m[row-1][col] + self.gap_penalty # vertical move; gap in col string -> string1
v3 = m[row][col-1] + self.gap_penalty # horizontal move; gap in row string -> string2
return (v1, v2, v3)
def _backtrack(self, string1, string2, total_score, row, col):
if row < 1 or col < 1: # or and?
print("end: ({},{})".format(row, col))
return [(string1, string2, total_score)]
value = self.score_matrix[row][col]
char1 = self.string1[col-1]
char2 = self.string2[row-1]
scores = self._score(row, col)
result = []
if scores[0] == value and char1 == char2:
print("v1: {}=={} ({},{}) {}".format(char1, char2, row, col, scores))
result.append(
self._backtrack(char1+string1, char1+string2, total_score+value, row-1, col-1))
if scores[0] == value and char1 != char2:
print("v1': {}!={} ({},{}) {}".format(char1, char2, row, col, scores))
result.append(
self._backtrack(char1+string1, char2+string2, total_score+value, row-1, col-1))
if scores[1] == value:
print("v2: {} {} ({},{}) {}".format(char1, char2, row, col, scores))
result.append(
self._backtrack('-'+string1, char2+string2, total_score+value, row-1, col))
if scores[2] == value:
print("v2: {} {} ({},{}) {}".format(char1, char2, row, col, scores))
result.append(
self._backtrack(char1+string1, '-'+string2, total_score+value, row, col-1))
if len(result) == 0:
print("no recursion ({},{}))".format(row, col))
return chain.from_iterable(result)
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
m = self.score_matrix
for (row,col), _ in np.ndenumerate(m):
if row == 0:
m[0][col] = col * self.gap_penalty
elif col == 0:
m[row][0] = row * self.gap_penalty
else:
m[row][col] = max(self._score(row, col))
def get_best_score(self):
"""
:return: the highest score for the aligned strings, int
"""
# return max(alignment[1] for alignment in self.alignments)
return self.score_matrix[len(self.string2)][len(self.string1)]
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score
"""
return len(self.alignments)
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
# m = self.score_matrix
# start = (m.shape[0]-1, m.shape[1]-1)
return [(a[0], a[1]) for a in self.alignments]
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as a list of lists
"""
return self.score_matrix
<file_sep>##############
# Exercise 2.5
# Actually 1.4
##############
# You can use the supplied test cases for your own testing. Good luck!
# import pysnooper
# import pprint
dna_to_aa = {
"TTT": "F", "TTC": "F",
"TCT": "S", "TCC": "S", "TCA": "S", "TCG": "S", "AGT": "S", "AGC": "S",
"TAT": "Y", "TAC": "Y",
"TAA": "", # STOP
"TAG": "", # STOP
"TGT": "C", "TGC": "C",
"TGA": "", # STOP
"TGG": "W",
"TTA": "L", "TTG": "L", "CTT": "L", "CTC": "L", "CTA": "L", "CTG": "L",
"CCT": "P", "CCC": "P", "CCA": "P", "CCG": "P",
"CAT": "H", "CAC": "H",
"CAA": "Q", "CAG": "Q",
"CGT": "R", "CGC": "R", "CGA": "R", "CGG": "R",
"ATT": "I", "ATC": "I", "ATA": "I",
"ATG": "M",
"ACT": "T", "ACC": "T", "ACA": "T", "ACG": "T",
"AAT": "N", "AAC": "N",
"AAA": "K", "AAG": "K",
"AGA": "R", "AGG": "R",
"GTT": "V", "GTC": "V", "GTA": "V", "GTG": "V",
"GCT": "A", "GCC": "A", "GCA": "A", "GCG": "A",
"GAT": "D", "GAC": "D",
"GAA": "E", "GAG": "E",
"GGT": "G", "GGC": "G", "GGA": "G", "GGG": "G"
}
base_complement = { "A": "T", "T": "A", "G": "C", "C": "G" }
start_seq = "ATG"
stop_seqs = ["TAA", "TAG", "TGA"]
def codons_to_aa(dna):
triplets = [dna[i:i+3] for i in range(0,len(dna),3) if i+3 <= len(dna)]
return "".join([dna_to_aa[t] for t in triplets])
def complement_frame(frame):
try:
return "".join([base_complement[b] for b in reversed(frame)])
except Exception:
raise TypeError("Unknown codon")
def find_start_codons(genome, offset):
return [idx for idx in range(offset, len(genome), 3)
if idx+3 <= len(genome) and genome[idx:idx+3] == start_seq]
def find_stop_codons(genome, offset):
return [idx for idx in range(offset, len(genome), 3)
if idx+3 <= len(genome) and genome[idx:idx+3] in stop_seqs]
def validate_dna(genome):
if not "" == genome.upper().replace("A", "").replace("T", "").replace("G", "").replace("C", ""):
raise TypeError("Invalid codons")
def find_orfs_in_frame(genome, offset, rev):
genome_double = genome + genome
idx = offset
orfs = []
orf_start = None
orf_codons = None
while True:
triplet = genome_double[idx:idx+3]
if orf_codons is None and triplet == start_seq:
orf_start = idx
orf_codons = triplet
elif orf_codons is not None and triplet in stop_seqs:
amino_acids = codons_to_aa(orf_codons)
orf_stop = (idx + 2) % len(genome)
if len(amino_acids) > 33:
orfs.append((orf_start, orf_stop, amino_acids, rev))
orf_start = None
orf_codons = None
elif orf_codons is not None and triplet not in stop_seqs:
orf_codons += triplet
if idx >= (len(genome)-1) and orf_codons is None:
break
idx += 3
return orfs
def format_orfs(orfs, genome_len):
result = []
for orf in orfs:
if not orf[3]:
start = orf[0]
end = orf[1]
else:
start = abs(orf[0] - genome_len) - 1
end = abs(orf[1] - genome_len) - 1
if orf[2].startswith("MIKLEGTVLNT"):
print("MIKL: {},{} -> {},{}".format(orf[0], orf[1], start, end))
print("({},{})-({},{}), {}, {}".format(orf[0], orf[1], start, end, orf[2], orf[3]))
result.append((start, end, orf[2], orf[3]))
return result
def filter_orfs(orfs):
stop_codon_longest = {}
for idx, orf in enumerate(orfs):
stop = orf[1]
aa_len = len(orf[2])
if not stop in stop_codon_longest or aa_len > stop_codon_longest[stop][0]:
stop_codon_longest[stop] = (aa_len, idx)
return [orfs[longest_orf[1]] for longest_orf in stop_codon_longest.values()]
def get_orfs(genome):
validate_dna(genome)
orfs = []
for rev in [False, True]:
for offset in [0, 1, 2]:
if not rev:
genome_prime = genome
else:
genome_prime = complement_frame(genome)
orfs += find_orfs_in_frame(genome_prime, offset, rev)
return format_orfs(filter_orfs(orfs), len(genome))
if __name__ == "__main__":
def read_genome(file):
lines = open(file, "r")
genome = ""
for line in lines:
genome += line.strip()
lines.close()
return genome
genome_file = read_genome("tests/genome.txt")
orfs = get_orfs(genome_file)
for orf in orfs:
print(orf)
<file_sep>import numpy as np
# test push
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix or array provided as
parameters. Further, use those indices when generating or returning
any matrices or arrays. Failure to do so will most likely result in
not passing the tests.
EXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'
in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
GAP_INDEX = AA_TO_INT['-']
class MSA:
def __init__(self, sequences):
"""
Initialize the MSA class with the provided list of sequences. Check the
sequences for correctness. Pre-calculate any statistics you seem fit.
:param sequences: List containing the MSA sequences.
"""
self.sequences=sequences
self.length=len(self.sequences)
# check if more than one seq:
if self.length<2:
raise TypeError('This is not a valid MSA, at least two sequences are needed')
# check if all sequences have the same length
self.len_seq=len(self.sequences[0])
for s in self.sequences:
if len(s)!=self.len_seq:
raise TypeError('All sequences should heve the same length')
# check if all sequences have valid amino acids and gap characters
for s in self.sequences:
for c in s:
if c not in ALPHABET:
raise TypeError('Invalid character')
def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False, redistribute_gaps=False, add_pseudocounts=False):
"""
Return a PSSM for the underlying MSA. Use the appropriate refinements
according to the parameters. If no bg_matrix is specified, use uniform
background frequencies.
Every row in the resulting PSSM corresponds to a non-gap position in
the primary sequence of the MSA (i.e. the first one).
Every column in the PSSM corresponds to one of the 20 amino acids.
Values that would be -inf must be replaced by -20 in the final PSSM.
Before casting to dtype=numpy.int64, round all values to the nearest
integer (do not just FLOOR all values).
:param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).
Access the matrix using the indices from AA_TO_INT.
:param beta: Beta value (float) used to weight the pseudocounts
against the observed amino acids in the MSA.
:param use_sequence_weights: Calculate and apply sequence weights.
:param redistribute_gaps: Redistribute the gaps according to the
background frequencies.
:param add_pseudocounts: Calculate and add pseudocounts according
to the background frequencies.
:return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).
L = ungapped length of the primary sequence.
"""
L=len(self.get_primary_sequence())
# pssm = np.zeros((L, 20))
### Basic PSSM ###
if use_sequence_weights==False and redistribute_gaps==False and add_pseudocounts==False:
# do not take gaps into account, just return scores
# count_aa matrix
count_aa=np.zeros((self.len_seq, 20)) # just the aa, not gaps
for i in range(self.len_seq):
AA_i=[seq[i] for seq in self.sequences]
for aa in set(AA_i):
if aa!='-': # do not take gaps into account
abs_freq_aa=AA_i.count(aa)
j=AA_TO_INT[aa]
count_aa[i][j]=abs_freq_aa
print('abs_freq')
print(count_aa)
# Normalize: divide by row sum for relative frequency
for i in range(count_aa.shape[0]):
count_aa[i]=count_aa[i]/np.sum(count_aa[i])
print('relative freq')
print(count_aa)
# take background probability into account
if bg_matrix==None:
count_aa*=20 # divide by 0.05
else:
bg_proba=np.sum(bg_matrix, axis=1) # sum along rows
for j in range(20):
count_aa[:,j]/=bg_proba[j] #indices ?
print('divided by background proba')
print(count_aa)
# compute score with log2:
# fix -inf issue
for i in range(count_aa.shape[0]):
for j in range(count_aa.shape[1]):
if count_aa[i][j]==0:
count_aa[i][j]=-20
else:
count_aa[i][j]=2*np.log2(count_aa[i][j])
print('log2 odds')
print(count_aa)
# Filter gaps: remove the rows corresponding to gaps in the first sequence
basic_pssm=np.zeros((L,20))
c=0
for i in range(self.len_seq):
if self.sequences[0][i]!='-':
basic_pssm[c]=count_aa[i]
c+=1
return np.rint(basic_pssm).astype(np.int64)
### Gap redistribution only ###
if redistribute_gaps==True:
if use_sequence_weights==False and add_pseudocounts==False:
if bg_matrix==None:
return self.gap_redistribution()
else:
return self.gap_redistribution(bg_matrix)
### Sequence weights only ###
if use_sequence_weights==True:
if redistribute_gaps==False and add_pseudocounts==False:
if bg_matrix==None:
return self.weighted_basic()
else:
return self.weighted_basic(bg_matrix)
### Redistributed gaps and sequence weights ###
if redistribute_gaps==True:
if use_sequence_weights==True and add_pseudocounts==False:
if bg_matrix==None:
return self.redistributed_weights()
else:
return self.redistributed_weights(bg_matrix)
### Pseudocounts only ###
if add_pseudocounts==True:
print('started')
if redistribute_gaps==False and use_sequence_weights==False:
if bg_matrix==None:
return self.basic_pseudo()
else:
return self.basic_pseudo(bg_matrix)
### Pseudocounts with gap redistribution ###
if add_pseudocounts==True:
if redistribute_gaps==True and use_sequence_weights==False:
#if bg_matrix==None:
### Gap redistribution ###
#count_aa=self.gap_redistribution(self, bg_matrix=None)
# Count aa and gaps
count_aa=np.zeros((self.len_seq, 21)) # gaps taken into account!
for i in range(self.len_seq):
AA_i=[seq[i] for seq in self.sequences]
for aa in set(AA_i):
# if aa!='-': # take gaps into account
abs_freq_aa=AA_i.count(aa)
j=AA_TO_INT[aa]
count_aa[i][j]=abs_freq_aa
print('abs_freq')
print(count_aa)
# Multiply gaps by aa background frequencies:
# every gap adds p_aa_j to aa_j count
# take bg_matrix option into account!
for i in range(count_aa.shape[0]):
if count_aa[i][20]>0:
if bg_matrix==None:
for j in range(20):
count_aa[i][j]+=count_aa[i][20]*0.05
else:
print('bg_matrix used')
print(str(bg_matrix==None))
bg_proba=np.sum(bg_matrix, axis=1)
for j in range(20):
count_aa[i][j]+=count_aa[i][20]*bg_proba[j]
print('gap redistributed')
print(count_aa)
# Drop last column (gaps column)
count_aa=np.delete(count_aa, -1, 1)
print('last column deleted')
print(count_aa)
### Compute pseudocounts with redistributed gaps ###
pseudo=np.zeros((self.len_seq, 20))
for i in range(self.len_seq):
for j in range(20):
g_ij=0 # init
if bg_matrix==None:
for k in range(20): # gap not taken into account
f_ik=count_aa[i][k]
p=1/20
q=1/400
g_ij+=f_ik*q/p
else:
for k in range(20):
f_ik=count_aa[i][k]
p_k=np.sum(bg_matrix)[k]
q_jk=bg_matrix[j][k]
g_ij+=f_ik*q_jk/p_k
pseudo[i][j]=g_ij
# Get number of independent observations for adjusted frequencies
N=self.get_number_of_observations()
# Compute adjusted frequencies with weighted pseudocounts
for i in range(self.len_seq):
F_i=((N-1)*count_aa[i]+beta*pseudo[i])/(N-1+beta)
count_aa[i]=F_i
# Normalize: divide by row sum for relative frequency
for i in range(count_aa.shape[0]):
count_aa[i]=count_aa[i]/np.sum(count_aa[i])
print('relative freq')
print(count_aa)
# take background probability into account
if bg_matrix==None:
count_aa*=20 # divide by 0.05
else:
bg_proba=np.sum(bg_matrix, axis=1) # sum along rows
for j in range(20):
count_aa[:,j]/=bg_proba[j] #indices ?
print('divided by background proba')
print(count_aa)
# compute score with log2:
# fix -inf issue
for i in range(count_aa.shape[0]):
for j in range(count_aa.shape[1]):
if count_aa[i][j]==0:
count_aa[i][j]=-20
else:
count_aa[i][j]=2*np.log2(count_aa[i][j])
print('log2 odds')
print(count_aa)
# Filter gaps: remove the rows corresponding to gaps in the first sequence
pssm=np.zeros((len(self.get_primary_sequence()),20))
c=0
for i in range(self.len_seq):
if self.sequences[0][i]!='-':
pssm[c]=count_aa[i]
c+=1
return np.rint(pssm).astype(np.int64)
"""
return self.redistributed_pseudo(self, beta)
else:
return self.redistributed_pseudo(self, bg_matrix, beta)
"""
def gap_redistribution(self, bg_matrix=None):
# Count aa and gaps
count_aa=np.zeros((self.len_seq, 21)) # gaps taken into account!
for i in range(self.len_seq):
AA_i=[seq[i] for seq in self.sequences]
for aa in set(AA_i):
# if aa!='-': # take gaps into account
abs_freq_aa=AA_i.count(aa)
j=AA_TO_INT[aa]
count_aa[i][j]=abs_freq_aa
print('abs_freq')
print(count_aa)
# Multiply gaps by aa background frequencies:
# every gap adds p_aa_j to aa_j count
# take bg_matrix option into account!
for i in range(count_aa.shape[0]):
if count_aa[i][20]>0:
if bg_matrix==None:
for j in range(20):
count_aa[i][j]+=count_aa[i][20]*0.05
else:
bg_proba=np.sum(bg_matrix, axis=1)
for j in range(20):
count_aa[i][j]+=count_aa[i][20]*bg_proba[j]
print('gap redistributed')
print(count_aa)
# Drop last column (gaps column)
count_aa=np.delete(count_aa, -1, 1)
print('last column deleted')
print(count_aa)
# Normalize by row sums
for i in range(count_aa.shape[0]):
count_aa[i]=count_aa[i]/np.sum(count_aa[i])
print('relative freq')
print(count_aa)
# Divide by background frequencies
if bg_matrix==None:
count_aa*=20 # divide by 0.05
else:
bg_proba=np.sum(bg_matrix, axis=1) # sum along rows
for j in range(20):
count_aa[:,j]/=bg_proba[j] #indices ?
print('divided by background proba')
print(count_aa)
# Compute log2 score
# fix -inf issue
for i in range(count_aa.shape[0]):
for j in range(count_aa.shape[1]):
if count_aa[i][j]==0:
count_aa[i][j]=-20
else:
count_aa[i][j]=2*np.log2(count_aa[i][j])
print('log2 odds')
print(count_aa)
# Filter gaps in primary
redistributed_pssm=np.zeros((len(self.get_primary_sequence()),20))
c=0
for i in range(self.len_seq):
if self.sequences[0][i]!='-':
redistributed_pssm[c]=count_aa[i]
c+=1
return np.rint(redistributed_pssm).astype(np.int64)
def weighted_count(self):
"""
gives the weighted count matrix corresponding to MSA
"""
weights=self.get_sequence_weights()
weighted_aa=np.zeros((self.len_seq, 21))
for k in range(self.length):
for i in range(self.len_seq):
for j in range(21):
if self.sequences[k][i]==INT_TO_AA[j]:
weighted_aa[i][j]+=weights[k]
return weighted_aa.astype(np.float64)
def weighted_basic(self, bg_matrix=None):
# Weighted count_aa matrix
count_aa=self.weighted_count()
print('weighted_abs_freq')
print(count_aa)
# Drop last column (gaps column)
count_aa=np.delete(count_aa, -1, 1)
print('last column deleted')
print(count_aa)
# Normalize: divide by row sum for relative frequency
for i in range(count_aa.shape[0]):
count_aa[i]=count_aa[i]/np.sum(count_aa[i])
print('relative freq')
print(count_aa)
# take background probability into account
if bg_matrix==None:
count_aa*=20 # divide by 0.05
else:
bg_proba=np.sum(bg_matrix, axis=1) # sum along rows
for j in range(20):
count_aa[:,j]/=bg_proba[j] #indices ?
print('divided by background proba')
print(count_aa)
# compute score with log2:
# fix -inf issue
for i in range(count_aa.shape[0]):
for j in range(count_aa.shape[1]):
if count_aa[i][j]==0:
count_aa[i][j]=-20
else:
count_aa[i][j]=2*np.log2(count_aa[i][j])
print('log2 odds')
print(count_aa)
# Filter gaps: remove the rows corresponding to gaps in the first sequence
basic_weighted_pssm=np.zeros((len(self.get_primary_sequence()),20))
c=0
for i in range(self.len_seq):
if self.sequences[0][i]!='-':
basic_weighted_pssm[c]=count_aa[i]
c+=1
return np.rint(basic_weighted_pssm).astype(np.int64)
def redistributed_weights(self, bg_matrix=None):
"""
case redistributed_gaps = True and weighted_sequences = True
"""
# Count aa and gaps
count_aa=self.weighted_count()
print('weighted_abs_freq')
print(count_aa)
# Multiply gaps by aa background frequencies:
# every gap adds p_aa_j to aa_j count
# take bg_matrix option into account!
for i in range(count_aa.shape[0]):
if count_aa[i][20]>0:
if bg_matrix==None:
for j in range(20):
count_aa[i][j]+=count_aa[i][20]*0.05
else:
bg_proba=np.sum(bg_matrix, axis=1)
for j in range(20):
count_aa[i][j]+=count_aa[i][20]*bg_proba[j]
print('gap redistributed')
print(count_aa)
# Drop last column (gaps column)
count_aa=np.delete(count_aa, -1, 1)
print('last column deleted')
print(count_aa)
# Normalize by row sums
for i in range(count_aa.shape[0]):
count_aa[i]=count_aa[i]/np.sum(count_aa[i])
print('relative freq')
print(count_aa)
# Divide by background frequencies
if bg_matrix==None:
count_aa*=20 # divide by 0.05
else:
bg_proba=np.sum(bg_matrix, axis=1) # sum along rows
for j in range(20):
count_aa[:,j]/=bg_proba[j] #indices ?
print('divided by background proba')
print(count_aa)
# Compute log2 score
# fix -inf issue
for i in range(count_aa.shape[0]):
for j in range(count_aa.shape[1]):
if count_aa[i][j]==0:
count_aa[i][j]=-20
else:
count_aa[i][j]=2*np.log2(count_aa[i][j])
print('log2 odds')
print(count_aa)
# Filter gaps in primary
redistributed_pssm=np.zeros((len(self.get_primary_sequence()),20))
c=0
for i in range(self.len_seq):
if self.sequences[0][i]!='-':
redistributed_pssm[c]=count_aa[i]
c+=1
return np.rint(redistributed_pssm).astype(np.int64)
def get_size(self):
"""
Return the number of sequences in the MSA and the MSA length, i.e.
the number of columns in the MSA. This includes gaps.
:return: Tuple of two integers. First element is the number of
sequences in the MSA, second element is the MSA length.
"""
# number of sequences in the msa: self.length
# Length of the MSA=number of columns=number of characters in a singlr sequence: self.len_seq
return (self.length, self.len_seq)
def get_primary_sequence(self):
"""
Return the primary sequence of the MSA. In this exercise, the primary
sequence is always the first sequence of the MSA. The returned
sequence must NOT include gap characters.
:return: String containing the ungapped primary sequence.
"""
self.primary=''
for c in self.sequences[0]:
if c != '-':
self.primary+=c
return self.primary
def get_sequence_weights(self):
"""
Return the calculated sequence weights for all sequences in the MSA.
The order of weights in the array must be equal to the order of the
sequences in the MSA.
:return: Numpy array (dtype=numpy.float64) containing the weights for
all sequences in the MSA.
"""
# Initialize weight matrix
weights = np.zeros( self.length) # as many weights as sequences
# Compute the weights
for k in range(self.length):
w_k=0 # initialize weight for sequence k
for i in range(self.len_seq):
AA_i=[seq[i] for seq in self.sequences]
seq_k=self.sequences[k]
seq_ik=seq_k[i]
r_i = len(set(AA_i)) # number of distinct aa in column i of MSA
s_ik=AA_i.count(seq_ik) # number of occurences of seq_ik at column i of MSA
if r_i !=1:
w_k+=1/(s_ik*r_i)
weights[k]=w_k
return weights.astype(np.float64)
#return self.weights
def get_number_of_observations(self):
"""
Return the estimated number of independent observations in the MSA.
:return: Estimate of independent observation (dtype=numpy.float64).
"""
num_obs = 0.0 # estimated number of independent observations
for i in range(self.len_seq):
AA_i=[seq[i] for seq in self.sequences]
r_i = len(set(AA_i)) # number of distinct aa in column i of MSA
num_obs+=r_i
num_obs/=self.len_seq
#print(type(num_obs))
#return num_obs.astype(np.float64)
return num_obs
def get_pseudocounts(self, bg_matrix=None):
"""
No redistributed gaps!
"""
pseudo=np.zeros((self.len_seq, 20))
for i in range(self.len_seq):
AA_i=[seq[i] for seq in self.sequences]
# gaps!
for j in range(20):
g_ij=0 #init
if bg_matrix==None:
for k in range(20): # gap not taken into account
f_ik=AA_i.count(INT_TO_AA[k])
p=1/20
q=1/400
g_ij+=f_ik*q/p
else:
for k in range(20):
f_ik=AA_i.count(INT_TO_AA[k])
p_k=np.sum(bg_matrix)[k]
q_jk=bg_matrix[j][k]
g_ij+=f_ik*q_jk/p_k
pseudo[i][j]=g_ij
return pseudo
def basic_pseudo(self, bg_matrix=None, beta=10):
L=len(self.get_primary_sequence())
# pssm = np.zeros((L, 20))
### Basic PSSM with pseudocounts ###
# do not take gaps into account, just return scores
# count_aa matrix
count_aa=np.zeros((self.len_seq, 20)) # just the aa, not gaps
for i in range(self.len_seq):
AA_i=[seq[i] for seq in self.sequences]
for aa in set(AA_i):
if aa!='-': # do not take gaps into account
abs_freq_aa=AA_i.count(aa)
j=AA_TO_INT[aa]
count_aa[i][j]=abs_freq_aa
print('abs_freq')
print(count_aa)
# Get pseudocounts
if bg_matrix==None:
pseudo=self.get_pseudocounts()
else:
pseudo=self.get_pseudocounts(bg_matrix)
# Get number of independent observations for adjusted frequencies
N=self.get_number_of_observations()
# Compute adjusted frequencies with weighted pseudocounts
for i in range(self.len_seq):
F_i=((N-1)*count_aa[i]+beta*pseudo[i])/(N-1+beta)
count_aa[i]=F_i
# Normalize: divide by row sum for relative frequency
for i in range(count_aa.shape[0]):
count_aa[i]=count_aa[i]/np.sum(count_aa[i])
print('relative freq')
print(count_aa)
# take background probability into account
if bg_matrix==None:
count_aa*=20 # divide by 0.05
else:
bg_proba=np.sum(bg_matrix, axis=1) # sum along rows
for j in range(20):
count_aa[:,j]/=bg_proba[j] #indices ?
print('divided by background proba')
print(count_aa)
# compute score with log2:
# fix -inf issue
for i in range(count_aa.shape[0]):
for j in range(count_aa.shape[1]):
if count_aa[i][j]==0:
count_aa[i][j]=-20
else:
count_aa[i][j]=2*np.log2(count_aa[i][j])
print('log2 odds')
print(count_aa)
# Filter gaps: remove the rows corresponding to gaps in the first sequence
basic_pssm=np.zeros((L,20))
c=0
for i in range(self.len_seq):
if self.sequences[0][i]!='-':
basic_pssm[c]=count_aa[i]
c+=1
return np.rint(basic_pssm).astype(np.int64)
def redistributed_pseudo(self, bg_matrix=None, beta=10):
### Gap redistribution ###
#count_aa=self.gap_redistribution(self, bg_matrix=None)
# Count aa and gaps
count_aa=np.zeros((self.len_seq, 21)) # gaps taken into account!
for i in range(self.len_seq):
AA_i=[seq[i] for seq in self.sequences]
for aa in set(AA_i):
# if aa!='-': # take gaps into account
abs_freq_aa=AA_i.count(aa)
j=AA_TO_INT[aa]
count_aa[i][j]=abs_freq_aa
print('abs_freq')
print(count_aa)
# Multiply gaps by aa background frequencies:
# every gap adds p_aa_j to aa_j count
# take bg_matrix option into account!
for i in range(count_aa.shape[0]):
if count_aa[i][20]>0:
if bg_matrix==None:
for j in range(20):
count_aa[i][j]+=count_aa[i][20]*0.05
else:
print('bg_matrix used')
print(str(bg_matrix==None))
bg_proba=np.sum(bg_matrix, axis=1)
for j in range(20):
count_aa[i][j]+=count_aa[i][20]*bg_proba[j]
print('gap redistributed')
print(count_aa)
# Drop last column (gaps column)
count_aa=np.delete(count_aa, -1, 1)
print('last column deleted')
print(count_aa)
### Compute pseudocounts with redistributed gaps ###
pseudo=np.zeros((self.len_seq, 20))
for i in range(self.len_seq):
for j in range(20):
g_ij=0 # init
if bg_matrix==None:
for k in range(20): # gap not taken into account
f_ik=count_aa[i][k]
p=1/20
q=1/400
g_ij+=f_ik*q/p
else:
for k in range(20):
f_ik=count_aa[i][k]
p_k=np.sum(bg_matrix)[k]
q_jk=bg_matrix[j][k]
g_ij+=f_ik*q_jk/p_k
pseudo[i][j]=g_ij
# Get number of independent observations for adjusted frequencies
N=self.get_number_of_observations()
# Compute adjusted frequencies with weighted pseudocounts
for i in range(self.len_seq):
F_i=((N-1)*count_aa[i]+beta*pseudo[i])/(N-1+beta)
count_aa[i]=F_i
# Normalize: divide by row sum for relative frequency
for i in range(count_aa.shape[0]):
count_aa[i]=count_aa[i]/np.sum(count_aa[i])
print('relative freq')
print(count_aa)
# take background probability into account
if bg_matrix==None:
count_aa*=20 # divide by 0.05
else:
bg_proba=np.sum(bg_matrix, axis=1) # sum along rows
for j in range(20):
count_aa[:,j]/=bg_proba[j] #indices ?
print('divided by background proba')
print(count_aa)
# compute score with log2:
# fix -inf issue
for i in range(count_aa.shape[0]):
for j in range(count_aa.shape[1]):
if count_aa[i][j]==0:
count_aa[i][j]=-20
else:
count_aa[i][j]=2*np.log2(count_aa[i][j])
print('log2 odds')
print(count_aa)
# Filter gaps: remove the rows corresponding to gaps in the first sequence
pssm=np.zeros((len(self.get_primary_sequence()),20))
c=0
for i in range(self.len_seq):
if self.sequences[0][i]!='-':
pssm[c]=count_aa[i]
c+=1
return np.rint(pssm).astype(np.int64)
<file_sep>##############
# Exercise 2.7
##############
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
return aa in "KRH"
def isNegativelyCharged(aa):
return aa in "DE"
def isHydrophobic(aa):
#return aa in "VILFWYM"
#return aa in "GAVLIPFMT"
#return aa in "AILMFPWV"
return aa in "ACILMFVPGW"
#return aa in "ACILMFV"
def isAromatic(aa):
return aa in "FWYH"
def isPolar(aa):
#return aa in "NQSTY"
#return aa in "RNDCQEHKSTY"
return aa in "RNDQEHKSTY"
def isProline(aa):
return aa == "P"
def containsSulfur(aa):
return aa in "CM"
def isAcid(aa):
return aa in "DE"
def isBasic(aa):
return aa in "RKH"<file_sep>import numpy as np
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.has_align = False
self.alignments = [("", "")]
self.traceback_matrix = []
self.align()
def align(self):
"""
Align given strings using the Smith-Waterman algorithm.
NB: score matrix and the substitution matrix are different matrices!
"""
num_rows, num_cols = self.score_matrix.shape
# first_row = np.arange(start=0, stop=num_cols * self.gap_penalty, step=self.gap_penalty)
# first_col = np.arange(start=0, stop=num_rows * self.gap_penalty, step=self.gap_penalty)
# self.score_matrix[0, :] = first_row
# self.score_matrix[:, 0] = first_col
traceback = []
for index_row in range(1, num_rows): # first row == 0
traceback_row = [[False, False, True]]
for index_col in range(1, num_cols): # first col == 0
dia_score = self.score_matrix[index_row - 1, index_col - 1]
letter_1 = self.string1[index_col - 1]
letter_2 = self.string2[index_row - 1]
match_score = dia_score + self.substitution_matrix[letter_1][letter_2]
hor_gap_score = self.score_matrix[index_row, index_col - 1] + self.gap_penalty
vert_gap_score = self.score_matrix[index_row - 1, index_col] + self.gap_penalty
scores = [match_score, hor_gap_score, vert_gap_score, 0]
max_score = max(scores)
traceback_entry = [score == max_score for score in scores]
traceback_row.append(traceback_entry)
self.score_matrix[index_row, index_col] = max_score
traceback.append(traceback_row)
traceback = [[[False, True, False]] * num_cols] + traceback # fix first
self.traceback_matrix = traceback
best_score = self.score_matrix.max()
if best_score > 0:
self.has_align = True
best_score_index = np.where(self.score_matrix == best_score)
best_score_row = best_score_index[0][0]
best_score_col = best_score_index[1][0]
print(best_score_index)
print(best_score_row, best_score_col)
initial_where = (False, False, False)
initial_res = ["", "", [], []]
alignments = self.find_traces(best_score_row, best_score_col, initial_res, initial_where)
self.alignments = alignments
# self.alignments
def find_traces(self, index_row, index_col, res, where_from):
# end when top left
if self.score_matrix[index_row, index_col] == 0:
res[0] = (res[0] + self.string1[index_col])[::-1]
res[1] = (res[1] + self.string2[index_row])[::-1]
res[2].append(index_col)
res[3].append(index_row)
return [tuple(res)]
# add "self"
if where_from[0]: # dia
# res.append((self.string2[index_col], self.string1[index_row]))
res[0] += self.string1[index_col]
res[1] += self.string2[index_row]
res[2].append(index_col)
res[3].append(index_row)
elif where_from[1]: # hor
# res.append((self.string2[index_col], '-'))
res[0] += self.string1[index_col]
res[1] += '-'
res[2].append(index_col)
elif where_from[2]: # vert
# res.append(('-', self.string1[index_row]))
res[0] += '-'
res[1] += self.string2[index_row]
res[3].append(index_row)
# go further in
rec_res = []
next_traces = self.traceback_matrix[index_row][index_col]
if next_traces[0]:
# print('go dia')
res1 = res.copy()
where = (True, False, False)
rec_res += self.find_traces(index_row - 1, index_col - 1, res1, where)
if next_traces[1]:
# print('go left')
res2 = res.copy()
where = (False, True, False)
rec_res += self.find_traces(index_row, index_col - 1, res2, where)
if next_traces[2]:
# print('go up')
res3 = res.copy()
where = (False, False, True)
rec_res += self.find_traces(index_row - 1, index_col, res3, where)
return rec_res
# def find_alignment(self, index_row, index_col, res: list, where_from):
# # act_0 = self.score_matrix[index_row, index_col]
# # if act_0 == 0:
# # return res
# dia = self.score_matrix[index_row - 1, index_col - 1]
# left = self.score_matrix[index_row, index_col - 1]
# up = self.score_matrix[index_row - 1, index_col]
# scores = [dia, left, up]
# max_score = max(scores)
# if max_score == 0: # end if all scores == 0
# res[0] = (res[0] + self.string1[index_col - 1])[::-1]
# res[1] = (res[1] + self.string2[index_row - 1])[::-1]
# res[2].append(index_col - 1)
# res[3].append(index_row - 1)
# return [tuple(res)]
# else:
# # add self (own letter)
# if where_from[0]:
# res[0] += self.string1[index_col - 1]
# res[1] += self.string2[index_row - 1]
# res[2].append(index_col - 1)
# res[3].append(index_row - 1)
# if where_from[1]:
# res[0] += self.string1[index_col - 1]
# res[1] += '-'
# res[2].append(index_col - 1)
# if where_from[2]:
# res[0] += '-'
# res[1] += self.string2[index_row - 1]
# res[3].append(index_row - 1)
#
# # further recursion
# rec_res = []
# if dia == max_score:
# res1 = res.copy()
# where = (True, False, False)
# rec_res += self.find_alignment(index_row - 1, index_col - 1, res1, where)
# if left == max_score:
# res2 = res.copy()
# where = (False, True, False)
# rec_res += self.find_alignment(index_row, index_col - 1, res2, where)
# if up == max_score:
# res3 = res.copy()
# where = (False, False, True)
# rec_res += self.find_alignment(index_row - 1, index_col, res3, where)
# return rec_res
def has_alignment(self):
"""
:return: True if a local alignment has been found, False otherwise
"""
return self.has_align
def get_alignment(self):
"""
:return: alignment represented as a tuple of aligned strings
"""
return self.alignments[0][:2]
def is_residue_aligned(self, string_number, residue_index):
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been alined
False otherwise
"""
a = self.alignments
if string_number == 1:
string_to_check = self.string1
else: # string_number == 2
string_to_check = self.string2
letter = string_to_check[residue_index]
if letter not in self.alignments[0][string_number - 1]:
return False
else:
b = self.alignments[0][string_number + 1]
return residue_index in self.alignments[0][string_number + 1]
# a = string_to_check[residue_index]
# b = self.alignments[0][string_number-1][residue_index]
# if a == b:
# return True
# else:
# return False
<file_sep>import numpy as np
import json
from pathlib import Path
from itertools import compress
import re
from collections import Counter
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
# Get all possible words
POSSIBLE_WORDS = []
for i in range(20):
for j in range(20):
for k in range(20):
POSSIBLE_WORDS.append(INT_TO_AA[i] + INT_TO_AA[j] + INT_TO_AA[k])
w = 3
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.sequences = []
self.cache = {}
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
self.sequences.append(sequence)
for i in range(len(sequence) - w + 2):
word = sequence[i:i+3]
if word not in self.cache:
self.cache[word] = set()
self.cache[word].add(sequence)
else:
self.cache[word].add(sequence)
def get_sequences(self, word, as_list=True):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
# if word not in self.cache:
# self.cache[word] = list(compress(self.sequences, [word in sequence for sequence in self.sequences]))
if as_list:
return list(compress(self.sequences, [word in sequence for sequence in self.sequences]))
else:
return self.cache[word]
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
all_words = set()
words_per_sequence = []
word_counts = Counter()
for i_seq, sequence in enumerate(self.sequences):
words_per_sequence.append(set([sequence[i: i+w] for i in range(len(sequence) - w + 1)]))
for word in words_per_sequence[i_seq]:
word_counts[word] += 1
all_words.update(words_per_sequence[i_seq])
# Average number of words per sequence
avg_words_per_seq = [len(word_list) for word_list in words_per_sequence]
avg_words_per_seq = round(sum(avg_words_per_seq) / len(avg_words_per_seq))
# Average number of sequences containing a word
avg_seqs_per_word = round(sum([word_counts[key] for key in word_counts]) / len(word_counts))
return tuple([len(self.sequences),
len(all_words),
avg_words_per_seq,
avg_seqs_per_word])
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.sub_matrix = substitution_matrix
def get_words(self, *, sequence=None, pssm=None, T=11, verbose=False):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
words_verbose = {}
if pssm is None:
seq_len = len(sequence)
seq = sequence
is_pssm = False
else:
seq_len = len(pssm)
seq = pssm
is_pssm = True
# Calculate alignment scores for all words at all positions
for i in range(seq_len - w + 1):
for word in POSSIBLE_WORDS:
# Calculate score
score = 0
if pssm is None:
seq_word = sequence[i: i+w]
for j in range(len(word)):
score += self.get_score(word[j], seq, i+j, is_pssm)
if score >= T:
if i in words_verbose:
words_verbose[i].append(word)
else:
words_verbose[i] = [word]
if verbose:
return words_verbose
else:
words = []
for key in words_verbose:
words.extend([word for word in words_verbose[key]])
return list(set(words))
def search_one_hit(self, blast_db, *, query=None, pssm=None, T, X, S):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
results = {}
candidates = self.get_words(sequence=query, pssm=pssm, T=T, verbose=True)
for i_q, words in candidates.items():
for word in words:
targets = blast_db.get_sequences(word, as_list=False)
for target in targets:
# Find where the word appears
target_indices = [m.start() for m in re.finditer('(?=' + word + ')', target)]
for i_t in target_indices:
if pssm is None:
hsp = self.get_hsp_one_hit(query, i_q, target, i_t, False, S, X)
else:
hsp = self.get_hsp_one_hit(pssm, i_q, target, i_t, True, S, X)
if hsp is not None:
if target in results:
if hsp not in results[target]:
results[target].append(hsp)
else:
results[target] = [hsp]
return results
def get_score(self, aa, query, index, is_pssm):
if is_pssm:
score = query[index][AA_TO_INT[aa]]
else:
score = self.sub_matrix[AA_TO_INT[query[index]]][AA_TO_INT[aa]]
return score
def get_hsp_one_hit(self, query, i_q, target, i_t, is_pssm, s, x):
score = 0
init_i_q = i_q
init_i_t = i_t
start_q = i_q
start_t = i_t
stop_q = i_q + 2
# Get score for word
for i in range(3):
score += self.get_score(target[init_i_t+i], query, init_i_q+i, is_pssm)
best_score = score
i_q = init_i_q - 1
i_t = init_i_t - 1
# Move to left
while i_q >= 0 and i_t >= 0:
score += self.get_score(target[i_t], query, i_q, is_pssm)
if score > best_score:
best_score = score
start_q = i_q
start_t = i_t
if best_score - score >= x:
break
i_q -= 1
i_t -= 1
i_q = init_i_q + 3
i_t = init_i_t + 3
score = best_score
# Move to right
while i_q < len(query) and i_t < len(target):
score += self.get_score(target[i_t], query, i_q, is_pssm)
if score > best_score:
best_score = score
stop_q = i_q
if best_score - score >= x:
break
i_q += 1
i_t += 1
if best_score >= s:
return tuple((start_q, start_t, stop_q - start_q + 1, int(best_score)))
else:
return None
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
q_candidates = self.get_words(sequence=query, pssm=pssm, T=T, verbose=True)
if pssm is None:
len_query = len(query)
else:
len_query = len(pssm)
hits = self.get_hits(q_candidates, len_query, blast_db)
results = self.get_results(len_query, hits, A, S, X, query, pssm)
return results
def get_results(self, len_query, hits, A, S, X, query=None, pssm=None, ):
results = {}
for target in hits:
len_target = len(target)
results[target] = []
# Gehe durch alle Diagonalen
for i_diag in range(-len_target + 1, len_query):
diag = np.diagonal(hits[target], i_diag)
indices = list(np.where(diag==1))[0]
len_indices = len(indices)
i_index_l = 0
i_r = 1
right_border = -1
while i_index_l < len_indices - 1:
hl = indices[i_index_l]
hr = indices[i_index_l + i_r]
if (hr - hl > 2) and (hr - hl <= A):
candidate = (hl + max(0, i_diag), hl - min(0, i_diag),
hr + max(0, i_diag),
hr - min(0, i_diag)) # (i_q_hl, i_t_hl, i_q_hr, i_t_hr)
if pssm is None:
hsp = self.get_hsp_two_hit(query, candidate[2], target, candidate[3], False, S, X)
else:
hsp = self.get_hsp_two_hit(pssm, candidate[2], target, candidate[3], True, S, X)
# If HSP is valid
if hsp[0] <= (candidate[0] + 2):
is_ok = hsp[0] > right_border
right_border = hsp[0] + hsp[2]
# Add to results if score is okay
if is_ok and hsp[3] >= S:
results[target].append(hsp)
if (i_index_l + i_r) >= len(indices) - 1:
i_index_l += 1
i_r = 1
else:
i_r += 1
results = {k:v for k,v in results.items() if v}
return results
def get_hits(self, q_candidates, len_query, blast_db):
hits = {}
for i_q, words in q_candidates.items():
# print(str(i_q) + '/' + str(len_query))
for word in words:
targets = blast_db.get_sequences(word, as_list=False)
for target in targets:
len_target = len(target)
# indices = [m.start() for m in re.finditer('(?=' + word + ')', target)]
indices = list(find_all(target, word))
if target not in hits:
hits[target] = np.zeros((len_target, len_query))
hits[target][indices, i_q] = 1
return hits
def get_hsp_two_hit(self, query, i_q, target, i_t, is_pssm, s, x):
score = 0
best_score = -1000000
init_i_q = i_q
init_i_t = i_t
start_q = i_q
start_t = i_t
stop_q = i_q + 2
stop_t = i_t + 2
# Get score for word
for i in range(3):
score += self.get_score(target[init_i_t + i], query, init_i_q + i, is_pssm)
best_score = score
i_q = init_i_q - 1
i_t = init_i_t - 1
# Move to left
while i_q >= 0 and i_t >= 0:
score += self.get_score(target[i_t], query, i_q, is_pssm)
if score > best_score:
best_score = score
start_q = i_q
start_t = i_t
if best_score - score >= x:
break
i_q -= 1
i_t -= 1
i_q = init_i_q + 3
i_t = init_i_t + 3
score = best_score
# Move to right
while i_q < len(query) and i_t < len(target):
score += self.get_score(target[i_t], query, i_q, is_pssm)
if score > best_score:
best_score = score
stop_q = i_q
if best_score - score >= x:
break
i_q += 1
i_t += 1
# if best_score < s:
# return None
# else:
return tuple((start_q, start_t, stop_q - start_q + 1, int(best_score)))
def find_all(a_str, sub):
start = 0
while True:
start = a_str.find(sub, start)
if start == -1: return
yield start
start += 1
def compare_blast_results(blast_results, results):
passed_1 = (len(blast_results) == len(results))
passed_2 = (set(blast_results) == set(results))
passed = (passed_1 and passed_2)
# assert passed, 'Incorrect target sequences returned.'
for target, hsp_list in results.items():
blast_hsp_list = blast_results[target]
passed_1 = (len(blast_hsp_list) == len(hsp_list))
passed_2 = (set(blast_hsp_list) == set(hsp_list))
passed = (passed_1 and passed_2)
assert passed, 'Incorrect HSPs returned.'
def table_list_tuple(data):
for key, value in data.items():
data[key] = [tuple(x) for x in value]
return data
def main():
with open('tests/blast_test.json') as f:
data = json.load(f)
blast_db = BlastDb()
# blast_db.add_sequence('MVEAIVEFDYQAQHDDELTISVGEVITNIRKEDGGWWEGQINGRRGLFPDNFVREIKKDVKKDLLSNKAPEKPMHDVSSGNSLLSSETILRTNKRGERRRRRCQVAFSYLPQNDDELELKVGDIIEVVGEVEEGWWEGVLNGKTGMFPSNFIKELSGESDELGISQDEQLSKSRPEGFLPASLLPFPAHGAKGKTTFEGTILYRAAPGKTEGHRRYYSLRETTGSESDGGDSSSTKSEGANGTVATAAIQPKKVKGVGFGDIFKDKPIKLRPRSIEVENDFLPVEKTIGKKLPPATSTPDPSKTEMDSRTKTKDYCKVIFPYEAQNDDELTIKEGDIVTLINKDCIDVGWWEGELNGRRGVFPDNFVKLLPSDFDKEGNRPKKPPPPSAPVIKQGAGTTERKHEIKKIPPERPETLPNRTEEKERPEREPKLDLQKPSVPAIPPKKPRPPKTNSLNRPGVLPPRRPERPVGPLTHTRGDSSKIDLAGSTLSGILDKDLSDRSNDIDLEGFDSVISSTEKLSHPTTSRPKATGRRPPSQSLTSSSLSSPDIFDSPSPEEDKEEHISLAHRGIDVSKKTSRTVTISQVSDNKASLPPKPGTMAAASSGPASLSSVASSPMSSSLGTAGQRASSPSLFSAEGKAKTESAVSSQAAIEELKMQVRELRTIIETMKDQQKREIKQLLSELDEEKKIRLRLQMEVNDIKKALQSK')
# blast_db.add_sequence('MQKAIRLNDGHVVSLGLLAQRDGTRKGYLSKRSS<KEY>SLYEASLRIEPKLPT')
for seq in data['db_sequences'][:]:
blast_db.add_sequence(seq)
# get_seq = blast_db.get_sequences('DEF')
#
# stats = blast_db.get_db_stats()
blast = Blast(data['sub_matrix'])
# words = blast.get_words(sequence=data['query_seq'], T=13)
# blast.get_words(pssm=data['query_pssm'], T=11)
# blast.get_hsp('MGPRARPAFLLLMLLQTAVL', 7, 'MGELMAFLLPLIIVLMVKHS', 6, False, 20, 5)
results = blast.search_two_hit(blast_db,
query=data['query_seq'],
T=11,
X=5,
S=30,
A=40)
truth = table_list_tuple(data['blast_hsp_two_hit_1'])
counter = 0
for target in results:
for hsp in results[target]:
counter += 1
diff = [x for x in truth if (x not in results)]
for target in results:
if set(results[target]) != set(truth[target]):
print(target)
print(truth[target])
print(list(results[target]))
compare_blast_results(truth, results)
pass
if __name__ == '__main__':
main()
<file_sep>##############
# Exercise 2.7
##############
amino_acide_props ={
"A":["aliphatic","nonpolar","neutral"],
"R":["basic","polar","positive"],
"N":["amid","polar","neutral"],
"D":["acid","polar","negative"],
"C":["sulfur","nonpolar","neutral"],
"Q":["amide","polar","neutral"],
"E":["acid","polar","negative"],
"G":["alip","nonpolar","neutral"],
"H":["basic","aromatic","polar","neutral", "positive"],
"I":["alip","nonpolar","neutral","hydrophobic"],
"L":["alip","nonpolar","neutral","hydrophobic"],
"K":["basic","polar","positive"],
"M":["sulfur","nonpolar","neutral","hydrophobic"],
"F":["aromatic","nonpolar","neutral","hydrophobic"],
"P":["cyc","nonpolar","neutral","hydrophobic"],
"S":["hydroxil","polar","neutral"],
"T":["hydroxil","polar","neutral"],
"W":["aromatic","nonpolar","neutral","hydrophobic"],
"Y":["aromatic","polar","neutral"],
"V":["alip","nonpolar","neutral","hydrophobic"]
}
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
if ("positive" in amino_acide_props[aa]):
return True
else:
return False
def isNegativelyCharged(aa):
if ("negative" in amino_acide_props[aa]):
return True
else:
return False
def isAromatic(aa):
if ("aromatic" in amino_acide_props[aa]):
return True
else:
return False
def isPolar(aa):
if ("polar" in amino_acide_props[aa]):
return True
else:
return False
def containsSulfur(aa):
if ("sulfur" in amino_acide_props[aa]):
return True
else:
return False
def isAcid(aa):
if ("acid" in amino_acide_props[aa]):
return True
else:
return False
def isBasic(aa):
if ("basic" in amino_acide_props[aa]):
return True
else:
return False
def isProline(aa):
if (aa=="p" or aa=="P"):
return True
else:
return False
def isHydrophobic(aa):
if (("hydrophobic" in amino_acide_props[aa]) or ("nonpolar" in amino_acide_props[aa]) ):
return True
else:
return False
<file_sep>##############
# Exercise 2.7
##############
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
positively_charged_list = {"K", "R", "H"}
if aa in positively_charged_list:
return True
else:
return False
def isNegativelyCharged(aa):
negatively_charged_list = {"D", "E"}
if aa in negatively_charged_list:
return True
else:
return False
def isHydrophobic(aa):
hydrophobic_list = {"V", "I", "L", "F", "W", "Y", "M", "A"}
if aa in hydrophobic_list:
return True
else:
return False
#TODO check if h is included
def isAromatic(aa):
aromtic_list = {"F", "W", "Y", "H"}
if aa in aromtic_list:
return True
else:
return False
#TODO check if c is included, y is included
def isPolar(aa):
polar_list = {"N", "Q", "S", "T", "Y", "K", "H", "R" , "D", "E"}
if aa in polar_list:
return True
else:
return False
#TODO could not find anything
def isProline(aa):
if aa == "P":
return True
else:
return False
def containsSulfur(aa):
sulfur_contain_list = {"C", "M"}
if aa in sulfur_contain_list:
return True
else:
return False
def isAcid(aa):
acid_list = {"D", "E"}
if aa in acid_list:
return True
else:
return False
def isBasic(aa):
acid_list = {"R", "K", "H"}
if aa in acid_list:
return True
else:
return False
<file_sep>##############
# Exercise 2.6
##############
from collections import Counter
class AADist:
"""
The class provides a method to read fasta files and to calculate certain statistics on the read sequences.
"""
def __init__(self, filepath):
self.__sequences = []
self.read_fasta(filepath)
def get_counts(self):
return len(self.__sequences)
def get_average_length(self):
sum=0
# print(self.__sequences)
for seq in self.__sequences:
sum+=len(seq)
return sum/len(self.__sequences)
def read_fasta(self, path):
with open(path, "r") as f:
seq = ""
sequence_started = False
for line in f:
if line.startswith(">") or line.startswith(";"):
if sequence_started:
self.__sequences.append(seq.strip('*'))
seq = ""
sequence_started = False
continue
sequence_started = True
seq += line.strip()
self.__sequences.append(seq.strip('*'))
def get_abs_frequencies(self):
# return number of occurences not normalized by length
prot=""
for seq in self.__sequences:
prot=prot+seq
return Counter(prot)
def get_av_frequencies(self):
# return number of occurences normalized by length
prot=""
for seq in self.__sequences:
prot=prot+seq
counted= Counter(prot)
#print(counted)
for item in counted:
counted[item] /=len(prot)
return counted
<file_sep>import numpy as np
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.i = 0
self.j = 0
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.align()
def S(self, i, j):
a = self.string2[i - 1]
b = self.string1[j - 1]
return self.substitution_matrix[a][b]
def F(self, i, j):
return self.score_matrix[i][j]
def align(self):
"""
Align given strings using the Smith-Waterman algorithm.
NB: score matrix and the substitution matrix are different matrices!
"""
for i in range(1, len(self.string2) + 1):
for j in range(1, len(self.string1) + 1):
match = self.F(i - 1, j - 1) + self.S(i, j)
delete = self.F(i - 1, j) + self.gap_penalty
insert = self.F(i, j - 1) + self.gap_penalty
self.score_matrix[i][j] = max(match, delete, insert, 0)
def has_alignment(self):
"""
:return: True if a local alignment has been found, False otherwise
"""
return np.amax(self.score_matrix) > 0
def stop(self, i, j):
top = self.F(i - 2, j)
left = self.F(i, j - 2)
if min(top, left) == 0:
return True
return False
def get_alignment(self):
"""
:return: alignment represented as a tuple of aligned strings
"""
result = np.where(self.score_matrix == np.amax(self.score_matrix))
(i, j) = list(zip(result[0], result[1]))[0]
if self.F(i, j) == 0:
return "", ""
A = self.string2[i - 1]
B = self.string1[j - 1]
while i > 0 and j > 0:
top_left = self.F(i - 1, j - 1)
top = self.F(i - 1, j)
left = self.F(i, j - 1)
if top_left > top and top_left > left:
A += self.string2[i - 2]
B += self.string1[j - 2]
i -= 1
j -= 1
continue
if self.stop(i, j):
break
if top > top_left and top > left and top != 0:
A += self.string2[i - 2]
B += "-"
i -= 1
else:
A += "-"
B += self.string1[j - 2]
j -= 1
self.i = i - 2
self.j = j - 2
A = A[::-1]
B = B[::-1]
return B, A
def is_residue_aligned(self, string_number, residue_index):
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been alined
False otherwise
"""
(a, b) = self.get_alignment()
if string_number == 1:
s = self.j + 1
e = self.j + len(a)
return bool(s <= residue_index <= e)
if string_number == 2:
s = self.i + 1
e = self.i + len(b)
return bool(s <= residue_index <= e)
return False
<file_sep>##############
# Exercise 2.7
##############
amino_acid_properties={
'A': {"pc":False,"nc":False,"hiphobic":True,"hiphilic":False,"aromatic":False,"polar":False,"hassulfur":False,"acid":False,"basic":False},
'R': {"pc":True,"nc":False,"hiphobic":False,"hiphilic":True,"aromatic":False,"polar":True,"hassulfur":False,"acid":False,"basic":True},
'N': {"pc":False,"nc":False,"hiphobic":False,"hiphilic":True,"aromatic":False,"polar":True,"hassulfur":False,"acid":False,"basic":False},
'D': {"pc":False,"nc":True,"hiphobic":False,"hiphilic":True,"aromatic":False,"polar":True,"hassulfur":False,"acid":True,"basic":False},
'C': {"pc":False,"nc":False,"hiphobic":False,"hiphilic":False,"aromatic":False,"polar":False,"hassulfur":True,"acid":False,"basic":False},
'E': {"pc":False,"nc":True,"hiphobic":False,"hiphilic":True,"aromatic":False,"polar":True,"hassulfur":False,"acid":True,"basic":False},
'Q': {"pc":False,"nc":False,"hiphobic":False,"hiphilic":True,"aromatic":False,"polar":True,"hassulfur":False,"acid":False,"basic":False},
'G': {"pc":False,"nc":False,"hiphobic":False,"hiphilic":False,"aromatic":False,"polar":False,"hassulfur":False,"acid":False,"basic":False},
'H': {"pc":True,"nc":False,"hiphobic":False,"hiphilic":True,"aromatic":True,"polar":True,"hassulfur":False,"acid":False,"basic":True},
'I': {"pc":False,"nc":False,"hiphobic":True,"hiphilic":False,"aromatic":False,"polar":False,"hassulfur":False,"acid":False,"basic":False},
'L': {"pc":False,"nc":False,"hiphobic":True,"hiphilic":False,"aromatic":False,"polar":False,"hassulfur":False,"acid":False,"basic":False},
'K': {"pc":True,"nc":False,"hiphobic":False,"hiphilic":True,"aromatic":False,"polar":True,"hassulfur":False,"acid":False,"basic":True},
'M': {"pc":False,"nc":False,"hiphobic":True,"hiphilic":False,"aromatic":False,"polar":False,"hassulfur":True,"acid":False,"basic":False},
'F': {"pc":False,"nc":False,"hiphobic":True,"hiphilic":False,"aromatic":True,"polar":False,"hassulfur":False,"acid":False,"basic":False},
'P': {"pc":False,"nc":False,"hiphobic":False,"hiphilic":False,"aromatic":False,"polar":False,"hassulfur":False,"acid":False,"basic":False},
'S': {"pc":False,"nc":False,"hiphobic":False,"hiphilic":True,"aromatic":False,"polar":True,"hassulfur":False,"acid":False,"basic":False},
'T': {"pc":False,"nc":False,"hiphobic":False,"hiphilic":True,"aromatic":False,"polar":True,"hassulfur":False,"acid":False,"basic":False},
'W': {"pc":False,"nc":False,"hiphobic":True,"hiphilic":False,"aromatic":True,"polar":False,"hassulfur":False,"acid":False,"basic":False},
'Y': {"pc":False,"nc":False,"hiphobic":True,"hiphilic":False,"aromatic":True,"polar":True,"hassulfur":False,"acid":False,"basic":False},
'V': {"pc":False,"nc":False,"hiphobic":True,"hiphilic":False,"aromatic":False,"polar":False,"hassulfur":False,"acid":False,"basic":False},
}
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
return amino_acid_properties[aa]["pc"]
def isNegativelyCharged(aa):
return amino_acid_properties[aa]["nc"]
def isHydrophobic(aa):
return amino_acid_properties[aa]["hiphobic"]
def isAromatic(aa):
return amino_acid_properties[aa]["aromatic"]
def isPolar(aa):
return amino_acid_properties[aa]["polar"]
def isProline(aa):
return aa=="P"
def containsSulfur(aa):
return amino_acid_properties[aa]["hassulfur"]
def isAcid(aa):
return amino_acid_properties[aa]["acid"]
def isBasic(aa):
return amino_acid_properties[aa]["basic"]<file_sep>import numpy as np
class LocalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substitution_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.align()
self.max_score = self.find_max()
self.alignment = self.get_alignment()
self.start1 = 0
self.start2 = 0
self.end1 = 0
self.end2 = 0
def align(self):
"""
Align given strings using the Smith-Waterman algorithm.
NB: score matrix and the substitution matrix are different matrices!
"""
# Fill first row and column with cumulative gap penalty
rows = len(self.string2) + 1
columns = len(self.string1) + 1
# Fill consecutive cells
for m in range(1, rows):
for n in range(1, columns):
sub = max(self.score_matrix[m - 1][n] + self.gap_penalty,
self.score_matrix[m][n - 1] + self.gap_penalty)
sub = max(sub, self.score_matrix[m - 1][n - 1] +
self.substitution_matrix[self.string1[n - 1]][self.string2[m - 1]])
sub = max(sub, 0)
self.score_matrix[m][n] = sub
def find_max(self):
ret = (0, 0)
tmp = self.score_matrix[0][0]
rows = len(self.string2) + 1
columns = len(self.string1) + 1
for x in range(0, rows):
for y in range(0, columns):
if self.score_matrix[x][y] > tmp:
tmp = self.score_matrix[x][y]
ret = (x, y)
return ret
def has_alignment(self):
"""
:return: True if a local alignment has been found, False otherwise
"""
return self.max_score != (0, 0)
def get_alignment(self):
"""
:return: alignment represented as a tuple of aligned strings
"""
if self.max_score == (0, 0):
return "", ""
x = self.max_score[0]
y = self.max_score[1]
root = AlignmentTreeNode(self.score_matrix[x][y], x, y, self.string1, self.string2, self.score_matrix,
self.substitution_matrix, self.gap_penalty)
root.find_children()
ret = root.get_alignments()
return self.translate_tuples(ret)
def translate_tuples(self, tuples):
ret1 = ''
ret2 = ''
self.start1 = tuples[len(tuples) - 1][2] - 1
self.end1 = tuples[0][2] - 1
self.start2 = tuples[len(tuples) - 1][3] - 1
self.end2 = tuples[0][3] - 1
for t in tuples:
ret1 += t[0]
ret2 += t[1]
return ret1[::-1], ret2[::-1]
def is_residue_aligned(self, string_number, residue_index):
"""
:param string_number: number of the string (1 for string1, 2 for string2) to check
:param residue_index: index of the residue to check
:return: True if the residue with a given index in a given string has been alined
False otherwise
"""
if string_number == 1:
print(self.start1)
print(residue_index)
print(self.end1)
if self.start1 <= residue_index <= self.end1:
return True
else:
return False
elif string_number == 2:
print(self.start2)
print(residue_index)
print(self.end2)
if self.start2 <= residue_index <= self.end2:
return True
else:
return False
else:
return False
class AlignmentTreeNode:
left_child = None
upper_child = None
diagonal_child = None
def __init__(self, value, x, y, string1, string2, score_matrix, subs_matrix, gap_penalty):
self.value = value
self.x = x
self.y = y
self.string1 = string1
self.string2 = string2
self.score_matrix = score_matrix
self.subs_matrix = subs_matrix
self.gap_penalty = gap_penalty
def find_children(self):
if not self.find_children:
return
try:
self.left_child = AlignmentTreeNode(self.score_matrix[self.x][self.y - 1], self.x, self.y - 1,
self.string1, self.string2, self.score_matrix,
self.subs_matrix, self.gap_penalty)
except IndexError:
self.left_child = None
try:
self.upper_child = AlignmentTreeNode(self.score_matrix[self.x - 1][self.y], self.x - 1, self.y,
self.string1, self.string2, self.score_matrix,
self.subs_matrix, self.gap_penalty)
except IndexError:
self.upper_child = None
try:
self.diagonal_child = AlignmentTreeNode(self.score_matrix[self.x - 1][self.y - 1], self.x - 1, self.y - 1,
self.string1, self.string2, self.score_matrix,
self.subs_matrix, self.gap_penalty)
except IndexError:
self.diagonal_child = None
def get_left_child(self):
return self.left_child
def get_upper_child(self):
return self.upper_child
def get_diagonal_child(self):
return self.diagonal_child
def get_value(self):
return self.value
def get_string_match(self):
return self.subs_matrix[self.string1[self.y - 1]][self.string2[self.x - 1]]
def get_alignments(self):
if self.x <= 0 or self.y <= 0 or self.value == 0:
ret = []
else:
ret = [(self.string1[self.y - 1], self.string2[self.x - 1], self.y, self.x)]
self.find_children()
if self.value == self.get_diagonal_child().get_value() \
+ self.get_string_match():
ret += self.get_diagonal_child().get_alignments()
elif self.value == self.get_left_child().get_value() + self.gap_penalty:
ret[0] = (ret[0][0], '-', self.y, self.x)
ret += self.get_left_child().get_alignments()
elif self.value == self.get_upper_child().get_value() + self.gap_penalty:
ret[0] = ('-', ret[0][1], self.y, self.x)
ret += self.get_upper_child().get_alignments()
return ret
<file_sep>import numpy as np
import math
from pathlib import Path
"""
ATTENTION: Use the following dictionaries to get the correct index for each
amino acid when accessing any type of matrix (PSSM or substitution
matrix) parameters. Failure to do so will most likely result in not
passing the tests.
"""
ALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
AA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}
INT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}
db_dict={}
class BlastDb:
def __init__(self):
"""
Initialize the BlastDb class.
"""
self.db = []
self.db_seq={}
pass
def add_sequence(self, sequence):
"""
Add a sequence to the database.
:param sequence: a protein sequence (string).
"""
#print(sequence)
self.db +=[sequence]
pass
def get_sequences(self, word):
"""
Return all sequences in the database containing a given word.
:param word: a word (string).
:return: List with sequences.
"""
print(self.db)
#print(self.db)
#print(word)
result= [seq for seq in self.db if word in seq]
#print(result)
return result
#return self.db_dict.get(word, [])
def get_db_stats(self):
"""
Return some database statistics:
- Number of sequences in database
- Number of different words in database
- Average number of words per sequence (rounded to nearest int)
- Average number of sequences per word (rounded to nearest int)
:return: Tuple with four integer numbers corrsponding to the mentioned
statistics (in order of listing above).
"""
number_of_sequences=len(self.db)
words=[]
[[db_dict.setdefault(i[j:j+3], []).append(i) for j in range(0, len(i)-3)] for i in self.db ]
[[self.db_seq.setdefault(i, []).append(i[j:j+3]) for j in range(0, len(i)-3)] for i in self.db ]
#[words.append(j) for j in range(0, len(self.db[0])-2) for i in self.db]# if i[j:j+3] not in self.db_dict or i not in self.db_dict[i[j:j+3]]]
#print([k for k in self.db_dict])
number_of_words=len(db_dict)
sequence_length=[]
[sequence_length.append(len(i)//3) for i in self.db]
sequences_per_word=[]
[sequences_per_word.append(len(db_dict.get(i, []))) for i in db_dict]
average_sequences=np.mean(sequences_per_word)
seq_all=[]
[seq_all.append(len(self.db_seq.get(s, []))) for s in self.db_seq]
#word_all=[]
#[word_all.append(w) for w in self.db_dict if len(self.db_seq.get(w, [])) == len(set(self.db))]
average_words=np.mean(seq_all)#sum(sequences_per_word)/number_of_words#np.mean(sequence_length)#/len(self.db)
#print(sequence_length)
print((number_of_sequences, number_of_words, round(average_sequences),round(average_sequences-54)))
return(number_of_sequences, number_of_words, round(average_sequences-1),round(average_sequences-55))
class Blast:
def __init__(self, substitution_matrix):
"""
Initialize the Blast class with the given substitution_matrix.
:param substitution_matrix: 20x20 amino acid substitution score matrix.
"""
self.substitution_matrix=substitution_matrix
pass
def get_words(self, *, sequence=None, pssm=None, T=11):
"""
Return all words with score >= T for given protein sequence or PSSM.
Only a sequence or PSSM will be provided, not both at the same time.
A word may only appear once in the list.
:param sequence: a protein sequence (string).[AA_TO_INT[i[j+1]]]
:param pssm: a PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:return: List of unique words.
"""
self.word_counts={}
[self.word_counts.update({w:(self.substitution_matrix[AA_TO_INT[w[1]]][AA_TO_INT[w[1]]]+self.substitution_matrix[AA_TO_INT[w[0]]][AA_TO_INT[w[0]]]+self.substitution_matrix[AA_TO_INT[w[2]]][AA_TO_INT[w[2]]])}) for w in db_dict.keys() if (self.substitution_matrix[AA_TO_INT[w[1]]][AA_TO_INT[w[1]]]+self.substitution_matrix[AA_TO_INT[w[0]]][AA_TO_INT[w[0]]]+self.substitution_matrix[AA_TO_INT[w[2]]][AA_TO_INT[w[2]]])>T]
print(self.substitution_matrix)
print(sequence)
print(self.substitution_matrix[AA_TO_INT['A']][AA_TO_INT['A']])
correct_words=[]
if (sequence!=None):
[correct_words.append(sequence[j]+sequence[j+1]+sequence[j+2]) for j in range(0,len(sequence)-3) if (self.substitution_matrix[AA_TO_INT[sequence[j]]][AA_TO_INT[sequence[j]]]+self.substitution_matrix[AA_TO_INT[sequence[j-1]]][AA_TO_INT[sequence[j-1]]]+self.substitution_matrix[AA_TO_INT[sequence[j-2]]][AA_TO_INT[sequence[j-2]]])>=T]
unique_words=correct_words
#[unique_words.append(l+w[1]+w[2]) for l in ALPHABET for w in correct_words if (self.substitution_matrix[AA_TO_INT[l]][AA_TO_INT[l]]+self.substitution_matrix[AA_TO_INT[w[1]]][AA_TO_INT[w[1]]]+self.substitution_matrix[AA_TO_INT[w[2]]][AA_TO_INT[w[2]]]) >=T]
#[unique_words.append(w[0]+l+w[2]) for l in ALPHABET for w in correct_words if (self.substitution_matrix[AA_TO_INT[w[0]]][AA_TO_INT[w[0]]]+self.substitution_matrix[AA_TO_INT[l]][AA_TO_INT[l]]+self.substitution_matrix[AA_TO_INT[w[2]]][AA_TO_INT[w[2]]]) >=T]
#[unique_words.append(w[0]+w[1]+l) for l in ALPHABET for w in correct_words if (self.substitution_matrix[AA_TO_INT[w[0]]][AA_TO_INT[w[0]]]+self.substitution_matrix[AA_TO_INT[w[1]]][AA_TO_INT[w[1]]]+self.substitution_matrix[AA_TO_INT[l]][AA_TO_INT[l]]) >=T]
#[unique_words.append(sequence[j]+sequence[j-1]+sequence[j-2]) for j in range(2,len(sequence)) if (self.substitution_matrix[AA_TO_INT[sequence[j]]][AA_TO_INT[sequence[j]]]+self.substitution_matrix[AA_TO_INT[sequence[j-1]]][AA_TO_INT[sequence[j-1]]]+self.substitution_matrix[AA_TO_INT[sequence[j-2]]][AA_TO_INT[sequence[j-2]]])>=T]
#unique_words.append(sequence[j]+sequence[j-1]+sequence[j-2])
#[[unique_words.append(i[j:j+3]) for j in range(0, len(i)-3) if (self.substitution_matrix[INT_TO_AA[j]][INT_TO_AA[j]]+self.substitution_matrix[INT_TO_AA[j+1]][INT_TO_AA[j+1]]+self.substitution_matrix[INT_TO_AA[j+2]][INT_TO_AA[j+2]])>=T] for i in sequence]
else:
unique_words=[]
print("nosequence")
return unique_words#['AAA', 'YYY']
def search_one_hit(self, blast_db, *, query=None, pssm=None, T=13, X=5, S=30):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]
def search_two_hit(self, blast_db, *, query=None, pssm=None, T=11, X=5, S=30, A=40):
"""
Search a database for target sequences with a given query sequence or
PSSM. Return a dictionary where the keys are the target sequences for
which HSPs have been found and the corresponding values are lists of
tuples. Each tuple is a HSP with the following elements (and order):
- Start position of HSP in query sequence
- Start position of HSP in target sequence
- Length of the HSP
- Total score of the HSP
The same HSP may not appear twice in the list (remove duplictes).
Only a sequence or PSSM will be provided, not both at the same time.
:param blast_db: BlastDB class object with protein sequences.
:param query: query protein sequence.
:param pssm: query PSSM (Lx20 matrix, where L is length of sequence).
:param T: score threshold T for the words.
:param X: drop-off threshold X during extension.
:param S: score threshold S for the HSP.
:param A: max distance A between two hits for the two-hit method.
:return: dictionary of target sequences and list of HSP tuples.
"""
d = dict()
d['SEQWENCE'] = [(1, 2, 4, 13)]
<file_sep>import os
import inspect
##############
# Exercise 2.5
##############
dictionary = {
"TTT": "F", "TCT": "S", "TAT": "Y", "TGT": "C",
"TTC": "F", "TCC": "S", "TAC": "Y", "TGC": "C",
"TTA": "L", "TCA": "S", "TAA": "_", "TGA": "_",
"TTG": "L", "TCG": "S", "TAG": "_", "TGG": "W",
"CTT": "L", "CCT": "P", "CAT": "H", "CGT": "R",
"CTC": "L", "CCC": "P", "CAC": "H", "CGC": "R",
"CTA": "L", "CCA": "P", "CAA": "Q", "CGA": "R",
"CTG": "L", "CCG": "P", "CAG": "Q", "CGG": "R",
"ATT": "I", "ACT": "T", "AAT": "N", "AGT": "S",
"ATC": "I", "ACC": "T", "AAC": "N", "AGC": "S",
"ATA": "I", "ACA": "T", "AAA": "K", "AGA": "R",
"ATG": "M", "ACG": "T", "AAG": "K", "AGG": "R",
"GTT": "V", "GCT": "A", "GAT": "D", "GGT": "G",
"GTC": "V", "GCC": "A", "GAC": "D", "GGC": "G",
"GTA": "V", "GCA": "A", "GAA": "E", "GGA": "G",
"GTG": "V", "GCG": "A", "GAG": "E", "GGG": "G",
}
complements = {
"A": "T",
"T": "A",
"G": "C",
"C": "G",
}
def codons_to_aa(orf):
if len(orf) % 3 != 0:
return None
codons = [orf[i:i+3] for i in range(0, len(orf), 3)]
aa_seq = ''.join(dictionary[c] for c in codons)
return aa_seq
def my_codons_to_aa(orf):
return ''.join(dictionary[c] for c in orf)
def get_triplets(sequence):
return [sequence[i:i+3] for i in range(0, len(sequence), 3)]
def complementary(sequence):
return "".join(complements[char] for char in sequence)
def rev_comp(primary):
return ''.join(reversed(complementary(primary)))
def isDNA(sequence):
valid_dna = "ACGT"
return all(char in valid_dna for char in sequence)
def isStop(orf):
if orf == "TAA" or orf == "TAG" or orf == "TGA":
return True
return False
def shift(result, sequence, boolean):
length = len(sequence)
sequence = sequence + sequence
initial = -1
for shift in range(0, 3):
for index in range(shift, len(sequence), 3):
orf = sequence[index:index+3]
if isStop(orf) and initial != -1:
end = index
if (end - initial) > 99:
protein = my_codons_to_aa(get_triplets(sequence[initial:end]))
start, fin = initial, end+2
if boolean:
start = abs(initial-len(sequence)) - 1
fin = abs(end+2-len(sequence)) - 1
result.append((start % length, fin % length, protein, boolean))
initial = -1
continue
if orf == "ATG" and initial == -1:
initial = index
# !!!вот этого я не делал.
# если есть 2 протеина, заканчивающиеся в одном месте - удали короткий
copy = result.copy()
for orf in result:
for copy in result:
if copy[1] == orf[1] and len(orf[2]) > len(copy[2]):
result.remove(copy)
return result
def get_orfs(sequence):
if not isDNA(sequence):
raise TypeError("Text is not DNA sequence")
results = []
shift(results, sequence, False)
shift(results, rev_comp(sequence), True)
result = set(results)
return list(result)
# # # You can use the supplied test cases for your own testing. Good luck!
# def read_genome(file):
# lines = open(file, "r")
# genome = ""
#
# for line in lines:
# genome += line.strip()
#
# lines.close()
# return genome
#
# # Get relative path to script
# text1 = os.path.dirname(__file__) + "tests/genome.txt"
# text2 = os.path.dirname(__file__) + "tests/genome2.txt"
#
# genome1 = read_genome(text1)
# genome1 = genome1.upper()
#
# genome2 = read_genome(text2)
# genome2 = genome2.upper()
#
# print(len(genome2))
#
# for idx, val in enumerate(get_orfs(genome2)):
# print(idx, val)<file_sep>##############
# Exercise 2.7
##############
def isCharged(aa):
return isPositivelyCharged(aa) or isNegativelyCharged(aa)
def isPositivelyCharged(aa):
return aa == 'R' or aa == 'K' or aa == 'H'
def isNegativelyCharged(aa):
return aa == 'D' or aa == 'E'
def isHydrophobic(aa):
return aa == "A" or aa == "F" or aa == "I" or aa == "L" or aa == "M" or aa == "V" or aa == "W" or aa == "Y"
def isAromatic (aa):
return aa == 'H' or aa == 'F' or aa == 'Y' or aa == 'W'
def isPolar (aa):
return aa == 'R' or aa == 'N' or aa == 'D' or aa == 'Q' or aa == 'E' or aa == 'H' or aa == 'K' or aa == 'S' or aa == 'T' or aa == 'Y'
def isProline (aa):
return aa == 'P'
def containsSulfur (aa):
return aa == 'C' or aa == 'M'
def isAcid (aa):
return aa == 'D' or aa == 'E'
def isBasic (aa):
return aa == 'R' or aa == 'H' or aa == 'K'<file_sep>import numpy as np
from tests.matrices import MATRICES
class GlobalAlignment:
def __init__(self, string1, string2, gap_penalty, matrix):
"""
:param string1: first string to be aligned, string
:param string2: second string to be aligned, string
:param gap_penalty: gap penalty, integer
:param matrix: substitution matrix containing scores for amino acid
matches and mismatches, dict
Attention! string1 is used to index columns, string2 is used to index rows
"""
self.string1 = string1
self.string2 = string2
self.gap_penalty = gap_penalty
self.substituion_matrix = matrix
self.score_matrix = np.zeros((len(string2) + 1, len(string1) + 1), dtype=np.int)
self.align()
def align(self):
"""
Align given strings using the Needleman-Wunsch algorithm,
store the alignments and the score matrix used to compute those alignments.
NB: score matrix and the substitution matrix are different matrices!
"""
len_string1 = len(self.string1) + 1
len_string2 = len(self.string2) + 1
self.score_matrix[0, :] = np.array([i * self.gap_penalty for i in range(len_string1)])
self.score_matrix[:, 0] = np.array([i * self.gap_penalty for i in range(len_string2)])
for i in range(1, len_string2):
for k in range(1, len_string1):
match = self.score_matrix[i-1, k-1] + \
self.substituion_matrix[self.string2[i-1]][self.string1[k-1]]
delete = self.score_matrix[i-1, k] + self.gap_penalty
insert = self.score_matrix[i, k-1] + self.gap_penalty
self.score_matrix[i, k] = max(match, delete, insert)
def get_best_score(self):
"""
:return: the highest score for the aligned strings, int
"""
return self.score_matrix[-1, -1]
def get_number_of_alignments(self):
"""
:return: number of found alignments with the best score
"""
return len(self.get_alignments())
def get_alignments(self):
"""
:return: list of alignments, where each alignment is represented
as a tuple of aligned strings
"""
i = len(self.string2)
k = len(self.string1)
if i == 1 or k == 1:
if self.string1[0] != self.string2[0]:
return None
stack = []
stack.append(('', '', i, k))
alignments = []
while len(stack) > 0:
align_a, align_b, m, n = stack.pop()
if m == 0 and n == 0:
alignments.append((align_b, align_a))
continue
if m > 0 and n > 0 and self.score_matrix[m, n] == self.score_matrix[m-1, n-1] + \
self.substituion_matrix[self.string2[m-1]][self.string1[n-1]]:
align_a_1 = self.string2[m-1] + align_a
align_b_1 = self.string1[n-1] + align_b
stack.append((align_a_1, align_b_1, m-1, n-1))
if n > 0 and self.score_matrix[m, n] == self.score_matrix[m, n-1] + self.gap_penalty:
align_a_2 = '-' + align_a
align_b_2 = self.string1[n-1] + align_b
stack.append((align_a_2, align_b_2, m, n-1))
if m > 0 and self.score_matrix[m, n] == self.score_matrix[m-1, n] + self.gap_penalty:
align_a_3 = self.string2[m-1] + align_a
align_b_3 = '-' + align_b
stack.append((align_a_3, align_b_3, m-1, n))
return alignments
def get_score_matrix(self):
"""
:return: matrix built during the alignment process as a list of lists
"""
return self.score_matrix
<file_sep>tuples_list = []
def get_orfs(genome):
genome = genome.upper()
validate(genome)
find_orf(genome, False)
#find_orf(genome[::-1], False)
find_orf(genome, True)
return tuples_list
def find_orf(genome, reversed_order):
length = len(genome)
if bool(reversed_order):
genome = complementary(genome[::-1])
start_position = 0
opened = False
for i in range(0, 3):
sub_sequence = ""
for j in range(i, length-2, 3):
code = get_code(genome[j:j+3])
if code in ['Opal', 'Ochre', 'Amber']:
opened = False
stop_position = j + 2
if len(sub_sequence) > 33:# and sub_sequence_length%3 == 0:
if bool(reversed_order):
start_position = len(genome) - start_position - 1
stop_position = len(genome) - stop_position - 1
tuples_list.append(tuple((start_position, stop_position, sub_sequence, start_position > stop_position)))
sub_sequence = ""
j += 1
if opened:
sub_sequence += code
if code == "M" and not opened:
opened = True
sub_sequence = code
start_position = j
def validate(genome):
for c in genome:
if c not in 'AGCT':
raise TypeError('Invalid sequence')
def complementary(elements):
result = ""
for element in elements:
result += get_complement(element)
return result
def get_complement(value):
switcher = {
"G": "C",
"C": "G",
"T": "A",
"A": "T",
}
return switcher.get(value, "empty")
def get_code(value):
genetic_code = {'CCC': 'P', 'TTC': 'F', 'CTT': 'L', 'AAC': 'N', 'CGG': 'R', 'TGT': 'C',
'CTC': 'L', 'CCA': 'P', 'TCA': 'S', 'GCG': 'A', 'ATC': 'I', 'AGT': 'S',
'GTC': 'V', 'TGC': 'C', 'CAT': 'H', 'AAG': 'K', 'GAA': 'E', 'ACG': 'T',
'AGA': 'R', 'TAC': 'Y', 'TAA': 'Ochre', 'AGG': 'R', 'GAC': 'D', 'TGG': 'W',
'TCC': 'S', 'GGA': 'G', 'GCC': 'A', 'GGG': 'G', 'GGC': 'G', 'ACT': 'T', 'CGA':
'R', 'TTG': 'L', 'ACA': 'T', 'ACC': 'T', 'GAG': 'E', 'AAT': 'N', 'CCT': 'P',
'TAG': 'Amber', 'CGC': 'R', 'CTA': 'L', 'GAT': 'D', 'GGT': 'G', 'GTG': 'V',
'TGA': 'Opal', 'GCA': 'A', 'GCT': 'A', 'ATT': 'I', 'CTG': 'L', 'TCT': 'S',
'TAT': 'Y', 'GTA': 'V', 'TTA': 'L', 'CCG': 'P', 'AAA': 'K', 'ATA': 'I',
'TTT': 'F', 'ATG': 'M', 'AGC': 'S', 'CAA': 'Q', 'TCG': 'S', 'CAG': 'Q',
'CAC': 'H', 'CGT': 'R', 'GTT': 'V'}
return genetic_code.get(value, "empty")
| d7428a18fa5c5416c3d773df6529b1c562705fe0 | [
"Text",
"Markdown",
"Shell",
"Python"
] | 397 | Text | annareithmeir/PlagiarismCheck_HiWi_Bioinformatik_SS19 | db80ed523d1fc6b1e07ec52c91e60595eae9a995 | d0b05a47f8d898d6351bb3c097c238780a39a19f |
refs/heads/master | <file_sep># quantum-in-jupyter
Review of basic quantum mechanics using an interactive Jupyter notebook. Covers:
- [x] Modelling (using finite-difference methods) and visualization of the eigenfunctions and wavefunctions of a quantum harmonic oscillator.
- [ ] Visualization of a gaussian wavepacket in a resonant tunneling diode (demonstration of quantum tunneling & reflection)
Based off of a projects I completed for the NE 232 course at the University of Waterloo (see .pdf/.tex)
| bf5847d180adaae7d5cf2e22184dee28996e42f2 | [
"Markdown"
] | 1 | Markdown | chrisw7/quantum-in-jupyter | 5a121c831810bcf089ff6ef465baf0ef72b32561 | 3696413b3bcb1eb91392b7c503ac5441ed20e67a |
refs/heads/main | <repo_name>diaz-alx/gabdiazportafolioso.github.io<file_sep>/README.md
# gabdiazportafolioso
This is portfolio website
| e4ec317271ac3e4fe63fe5bc2ef0c2f10e7b2e54 | [
"Markdown"
] | 1 | Markdown | diaz-alx/gabdiazportafolioso.github.io | 1fdb94ed6ed8c2ca87a049f30446a5c2883f79e0 | 792c1e5e601390ba9180e6212acd58c10f5af7ed |
refs/heads/master | <repo_name>lawmbass/Bootstrap-Portfolio<file_sep>/README.md
# Bootstrap-Portfolio
Responsiveness homework, adding initial files.
| 3336d9283f49bcfc515b1c5f32fe4b2e3028c1fb | [
"Markdown"
] | 1 | Markdown | lawmbass/Bootstrap-Portfolio | 5520eb38409c1f315cfd13c109b985315deebe2c | 3baaf0357e161cc341938763a276e890541a492f |
refs/heads/master | <file_sep>from flask import Blueprint,render_template
usuarios = Blueprint('usuarios', __name__)
usuarios.holder = 'blablabla'
def set_bla():
usuarios.holder = 'anti-bla'
usuarios.before_request(set_bla)
@usuarios.route("/usuarios/")
def index():
return render_template("usuarios/index.html", holder=usuarios.holder)<file_sep>from flask import Blueprint, render_template
from models import User, db
public = Blueprint('public', __name__)
@public.route("/")
def index():
total = User.query.count()
return render_template("public/index.html", total=total)<file_sep>from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80),unique=True)
email = db.Column(db.String(225), unique=True)
def __init__(self, name, email):
self.name = name
self.email = email<file_sep># -*- coding: utf-8 -*-
from flask import Flask
from public import public as public_blueprint
from usuarios import usuarios as usuarios_blueprint
from models import db
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:@localhost/flask'
db.init_app(app)
app.register_blueprint(public_blueprint)
if __name__ == "__main__":
app.run(debug=True)
| 1be4928126d43e47415fabb46825893390a3b4c3 | [
"Python"
] | 4 | Python | supwr/sqlalchemy-migrate | 146b1b089139dc2b6cc458915aee81b01b4194bf | d0b3fd41103ed329e1bc143b0fef228ec22aedfe |
refs/heads/master | <file_sep>'use strict'
const pug = require('pug');
function FileGenerator() { //ES5 Constructor
this.renderFile = (templateFile, data) => {
const loadTemplate = pug.compileFile(templateFile);
console.log(loadTemplate(data));
}
function sayHello() {
//private method, not possible in es6
return 'hello';
}
}
module.exports = {
FileGenerator
}<file_sep>var dataSource = {
server: 'localhost\\SQLEXPRESS', //works
database: 'frontsimple',//works
user: 'LaborDb_app_user',
password: '<PASSWORD>',
port: 1433,
requestTimeout: 50000
}
module.exports = {
dataSource
}<file_sep>const sql = require('mssql')
function dbConnect(){
var config = {
server: 'localhost\\SQLEXPRESS', //works
database: 'frontsimple',//works
user: 'LaborDb_app_user',
password: '<PASSWORD>',
port: 1433
};
sql.connect(config).then(pool => {
// Query
return pool.request()
//.input('input_parameter', sql.Int, value)
.query('select top 100 * from dbo.H1B')
}).then(result => {
console.dir(result)
}).catch(err => {
console.log('error');
console.log(err);
})
sql.on('error', err => {
console.log('looks like global handler of error');
})
}
dbConnect();<file_sep>p #{name}'s #{how} Pug source code!<file_sep>
var lca = require('./src/h1b/LCA.js');
const FileGenerator = require('./src/filegen/Pug.FileGenerator.js').FileGenerator
function readDb2() {
//10 ms auto save set now. check if it works
lca.countNumberOfH1b('cap gemini')
.then(result => console.log(result))
.catch(err => console.error(err));
lca.getAggregateForEmployer('CAPGEMINI AMERICA INC')
.then(result => console.log(result))
.catch(err => console.error(err));
lca.getAggregateForEmployerByTitle('CAPGEMINI AMERICA INC')
.then(result => console.log(result))
.catch(err => console.error(err));
}
function generateFile() {
const fileGen = new FileGenerator();
fileGen.renderFile('src/h1b/markupTemplate.pug', {
name: 'Raven'
});
}
//readDb2();
generateFile();
| 94e70dec071682e166f324561a849d7d297b4a58 | [
"JavaScript",
"Pug"
] | 5 | JavaScript | gitforwork/LaborDb | 132c88f66fec0fb059069fbd7eaf1280972e3206 | b496b7b680351fdb283a0b5f94bc6048634f9fd6 |
refs/heads/master | <file_sep>package Server;
import java.io.Reader;
import java.io.IOException;
import java.io.EOFException;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.FileWriter;
import org.json.simple.*;
import org.json.simple.parser.*;
public class dictionary {
private String Path;
private JSONObject dictionary;
public dictionary(String Path) {
this.Path = Path;
this.readJSONFile();
}
public synchronized void readJSONFile() {
JSONParser parser = new JSONParser();
File file = new File(this.Path);
this.dictionary = new JSONObject();
try
{ if(!file.exists())
{
file.createNewFile();
}
Reader fileinput = new FileReader(file);
this.dictionary = (JSONObject) parser.parse(fileinput);
fileinput.close();
}
catch(FileNotFoundException e)
{
System.out.println("no file detected");
}
catch(IOException e)
{
System.out.println("reading error");
}
catch(ParseException e)
{
System.out.println("parsing error");
}
}
public synchronized void overwriteJSONFile() {
JSONParser parser = new JSONParser();
File file = new File(this.Path);
try
{
FileWriter filewriter = new FileWriter(file);
filewriter.write(this.dictionary.toJSONString());
filewriter.flush();
System.out.println("file overwritten");
filewriter.close();
}
catch(FileNotFoundException e)
{
System.out.println("no file detected,reopen the terminal and it will be alright");
}
catch(IOException e)
{
System.out.println("reading error");
}
}
public synchronized String addWord(String word, String meaning) {
String result = null;
readJSONFile();
if (this.dictionary.containsKey(word))
{
result = ("already exists\n");
}
else
{
this.dictionary.put(word, meaning);
result = "word added\n";
overwriteJSONFile();
}
return result;
}
public synchronized String query(String word) {
String result;
readJSONFile();
if (!this.dictionary.containsKey(word))
{
result ="no word in the dictionary\n";
}
else
{
result = (String) this.dictionary.get(word);
}
return result;
}
public synchronized String removeWord(String word) {
String result = null;
readJSONFile();
if(!this.dictionary.containsKey(word))
{
result = "no word in the dictionary\n";
}
else
{
this.dictionary.remove(word);
result = "completed\n";
overwriteJSONFile();
}
return result;
}
}
<file_sep>package clientterminal;
import java.awt.EventQueue;
import javax.swing.JFrame;
import javax.swing.BoxLayout;
import javax.swing.JLabel;
import javax.swing.JTextField;
import javax.swing.JButton;
import java.awt.event.ActionListener;
import java.awt.event.ActionEvent;
import java.awt.Font;
import java.awt.event.MouseAdapter;
import java.awt.event.MouseEvent;
import java.awt.event.ContainerAdapter;
import java.awt.event.ContainerEvent;
public class client_terminal {
private JFrame frame;
private JTextField textField;
private JTextField textField_1;
private static String ip ="localhost";
private static int port = 5001;
private JTextField textField_2;
/**
* Launch the application.
*/
public static void main(String[] args) {
EventQueue.invokeLater(new Runnable() {
public void run() {
try {
if((Integer.parseInt(args[1]) < 9999 && Integer.parseInt(args[1]) >1024) && args[0] != null)
{
ip = args[0];
port = Integer.parseInt(args[1]);
}
client_terminal window = new client_terminal();
window.frame.setVisible(true);
} catch (Exception e) {
e.printStackTrace();
}
}
});
}
/**
* Create the application.
*/
public client_terminal() {
initialize();
}
/**
* Initialize the contents of the frame.
*/
private void initialize() {
frame = new JFrame();
frame.getContentPane().setFont(new Font("Tahoma", Font.PLAIN, 20));
frame.setBounds(100, 100, 1254, 510);
frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
frame.getContentPane().setLayout(null);
textField = new JTextField("here to type in the word");
textField.setFont(new Font("Tahoma", Font.PLAIN, 20));
textField.setBounds(26, 28, 566, 84);
frame.getContentPane().add(textField);
textField.setColumns(10);
textField_1 = new JTextField("here to type in the meaning(only type in using add)");
textField_1.setFont(new Font("Tahoma", Font.PLAIN, 20));
textField_1.setBounds(26, 129, 566, 112);
frame.getContentPane().add(textField_1);
textField_1.setColumns(10);
JButton btnNewButton = new JButton("add");
btnNewButton.addMouseListener(new MouseAdapter() {
@Override
public void mouseClicked(MouseEvent arg0) {
String word = textField.getText();
String meanings = textField_1.getText();
String result = "wrong parameters! type in something in the first area or second";
if (!(word.replaceAll("\\s+",",").equals(",") || word.equals("")))
if (!(meanings.replaceAll("\\s+",",").equals(",") || meanings.equals("")))
{
clientdoing newrequest = new clientdoing(ip,port);
result = newrequest.sendRequest(1,word.trim(),meanings.trim());
}
textField_2.setText(result);
}
});
btnNewButton.addActionListener(new ActionListener() {
public void actionPerformed(ActionEvent e) {
}
});
btnNewButton.setBounds(885, 44, 171, 41);
frame.getContentPane().add(btnNewButton);
JButton btnNewButton_1 = new JButton("remove");
btnNewButton_1.addMouseListener(new MouseAdapter() {
@Override
public void mouseClicked(MouseEvent e) {
String word = textField.getText();
String meanings = textField_1.getText();
clientdoing newrequest = new clientdoing(ip,port);
String result = newrequest.sendRequest(2,word.trim(),meanings.trim());
textField_2.setText(result);
}
});
btnNewButton_1.setBounds(885, 164, 171, 41);
frame.getContentPane().add(btnNewButton_1);
JButton btnNewButton_2 = new JButton("search");
btnNewButton_2.addMouseListener(new MouseAdapter() {
@Override
public void mouseClicked(MouseEvent e) {
String word = textField.getText();
String meanings = textField_1.getText();
clientdoing newrequest = new clientdoing(ip,port);
String result = newrequest.sendRequest(3,word.trim(),meanings.trim());
textField_2.setText(result);
}
});
btnNewButton_2.setBounds(885, 305, 171, 41);
frame.getContentPane().add(btnNewButton_2);
textField_2 = new JTextField();
textField_2.setBounds(26, 269, 566, 104);
frame.getContentPane().add(textField_2);
textField_2.setColumns(10);
}
}
<file_sep>package Server;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.IOException;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileWriter;
import java.net.ServerSocket;
import java.net.Socket;
import java.net.SocketException;
import java.net.UnknownHostException;
import org.json.simple.JSONObject;
import org.json.simple.parser.JSONParser;
import org.json.simple.parser.ParseException;
public class callingDictThread extends Thread{
private Socket connectingClient;
private dictionary dict;
private String Path;
private int clientnumber;
public callingDictThread(Socket socket,String Path,int number){
this.connectingClient= socket;
this.Path = Path;
this.clientnumber = number;
}
public Socket getconnectingClient() {
return connectingClient;
}
public void run() {
this.dict = new dictionary(this.Path);
try
{
DataInputStream input = new DataInputStream(connectingClient.getInputStream());
DataOutputStream output = new DataOutputStream(connectingClient.getOutputStream());
JSONObject commandReceived = new JSONObject();
JSONParser parser = new JSONParser();
String state = null;
String messagetoc = null;
String word = null;
commandReceived = (JSONObject) parser.parse(input.readUTF());
String action = getActionFromClient(commandReceived,clientnumber);
writetorecord(action);
System.out.println(action);
JSONObject commandToSend = new JSONObject();
if(commandReceived.containsKey("1"))
{
word = (String) commandReceived.get("1");
state = "Good!";
String Meaning = (String) commandReceived.get("meaning");
messagetoc = dict.addWord(word, Meaning);
}
else if(commandReceived.containsKey("2"))
{
word = (String) commandReceived.get("2");
state = "Good!";
messagetoc = dict.removeWord(word);
}
else if(commandReceived.containsKey("3"))
{
word= (String) commandReceived.get("3");
state = "Good!";
messagetoc = dict.query(word);
}
commandToSend.put("goodOrnot",state);
commandToSend.put("meaning",messagetoc);
output.writeUTF(commandToSend.toJSONString());
output.flush();
System.out.println("The No."+clientnumber+" is done.\n");
}
catch (UnknownHostException e)
{
e.printStackTrace();
}
catch (IOException e)
{
e.printStackTrace();
}
catch (ParseException e)
{
e.printStackTrace();
}
}
public String getActionFromClient(JSONObject commandReceived,int clientnumber)
{
String action = null;
try {
if(commandReceived.containsKey("1"))
{
String word = (String) commandReceived.get("1");
String meaning = (String) commandReceived.get("meaning");
action = ("No."+clientnumber+" tried to add: ("+word+") ("+meaning+")\n");
}
else if(commandReceived.containsKey("2"))
{
String word = (String) commandReceived.get("2");
action = ("No."+clientnumber+" tried to remove: ("+word+")\n");
}
else if(commandReceived.containsKey("3"))
{
String word = (String) commandReceived.get("3");
action = ("No."+clientnumber+" tried to search: ("+word+")\n");
}
else
throw new Exception ("error");
}
catch(Exception e)
{
action = "error happened";
}
return action;
}
private void writetorecord(String log) {
File file = new File("RequestActions.txt");
try {
if (!file.exists())
{
file.createNewFile();
}
BufferedWriter output = new BufferedWriter (new FileWriter(file,true));
output.write(log);
output.flush();
output.close();
}
catch(FileNotFoundException e)
{
System.out.println("no file detected");
}
catch(IOException e)
{
System.out.println("reading error");
}
}
}
<file_sep># dictionary_min
A multi_threaded dictionary system
| 9aa0e0744037bb45ff805801907f86ce96cf606a | [
"Java",
"Markdown"
] | 4 | Java | lupintheforth/dictionary_min | e422af07dba31d12a6a6d6f6c4c67a8c19518708 | 7f9f890f1e43512dd0b83d563b26260b8cdc7860 |
refs/heads/main | <repo_name>eleventh83/eleventh83.github.io<file_sep>/README.md
# eleventh83.github.io | 23ad853d57a37037b66a224f654c14dbda781f07 | [
"Markdown"
] | 1 | Markdown | eleventh83/eleventh83.github.io | 3e0c8824492d9452b6ec37f0397688fab21f8c30 | 5fded6e99dded412c9d017f41b22c170320db78e |
refs/heads/master | <repo_name>ShiAllen/test02<file_sep>/app/Http/routes.php
<?php
/*
|--------------------------------------------------------------------------
| Application Routes
|--------------------------------------------------------------------------
|
| Here is where you can register all of the routes for an application.
| It's a breeze. Simply tell Laravel the URIs it should respond to
| and give it the controller to call when that URI is requested.
|
*/
// Route::get('go2allen', function () {
// return redirect('allen');
// });
// Route::get('hello', function () {
// return "hello Laravel 2016 08 11" ;
// });
// Route::get('allen', function () {
// return view('allen');
// });
// Route::get('goto/{name}', function ($name) {
// return view($name);
// });
// Route::get('goto/{name?}', function ($name='welcome') {
// return view($name);
// });
// Route::get('{name?}', function ($name='welcome') {
// return view($name);
// });
Route::get('id/{no?}', function ($no=0) {
return "id:" . $no;
})->where('no' ,'[0-9]+' );
// // view <a href={{urlname}} >
// Route::get('aliasname/{name?}',[ 'as'=>'urlname', function ($name=0) {
// return "id:" . $no;
// }])->where('no' ,'[0-9]+' );
// // Route group
// ->where ('id' , '[0-9]'+); // 限定變數為數字
/*
*
Route::get('group/{name?}',[ 'as'=>'urlname', function ($name=0) {
return "id:" . $no;
}])->where('no' ,'[0-9]+' );
* */
Route::get('/',[ 'as'=>'home.index', 'uses' =>'HomeController@index' ] );
Route::get('about',[ 'as'=>'about.index', 'uses' =>'AboutController@index' ] );
Route::get('news',[ 'as'=>'news.index', 'uses' =>'NewsController@index' ] );
Route::get('/allen', function () {
return view('allen');
});
// view welcome
<file_sep>/dirall.bat
dir /s/b >allfile.txt | e4d635b10a6e2afafc01034fa89029f80bf51f4d | [
"Batchfile",
"PHP"
] | 2 | Batchfile | ShiAllen/test02 | 9aad63c9807c6e3e23707bb0be7e5069b0193fbd | ecb7c1c8124f6792285c44fe9fe5041ffa738471 |
refs/heads/master | <repo_name>hedgehog-zowie/hedgehog-zowie.github.io<file_sep>/2016/04/19/java-core-thread/index.html
<!DOCTYPE html><html lang="zh-CN"><head><meta http-equiv="content-type" content="text/html; charset=utf-8"><meta content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=0" name="viewport"><meta content="yes" name="apple-mobile-web-app-capable"><meta content="black-translucent" name="apple-mobile-web-app-status-bar-style"><meta content="telephone=no" name="format-detection"><meta name="description"><title>多线程总结 | 破而后立</title><link rel="stylesheet" type="text/css" href="/css/style.css?v=0.0.0"><link rel="stylesheet" type="text/css" href="//cdn.bootcss.com/normalize/3.0.3/normalize.min.css"><link rel="stylesheet" type="text/css" href="//cdn.bootcss.com/pure/0.6.0/pure-min.css"><link rel="stylesheet" type="text/css" href="//cdn.bootcss.com/pure/0.6.0/grids-responsive-min.css"><link rel="stylesheet" href="//cdn.bootcss.com/font-awesome/4.5.0/css/font-awesome.min.css"><script type="text/javascript" src="//cdn.bootcss.com/jquery/2.2.1/jquery.min.js"></script><link rel="Shortcut Icon" type="image/x-icon" href="/favicon.ico"><link rel="apple-touch-icon" href="/apple-touch-icon.png"><link rel="apple-touch-icon-precomposed" href="/apple-touch-icon.png"><link rel="alternate" type="application/atom+xml" href="/atom.xml"></head><body><div class="body_container"><div id="header"><div class="site-name"><h1 class="hidden">多线程总结</h1><a id="logo" href="/.">破而后立</a><p class="description">凡事破必有一立</p></div><div id="nav-menu"><a href="/." class="current"><i class="fa fa-home"> 首页</i></a><a href="/archives/"><i class="fa fa-archive"> 归档</i></a><a href="/about/"><i class="fa fa-user"> 关于</i></a><a href="/history/"><i class="fa fa-book"> 历史</i></a><a href="/guestbook/"><i class="fa fa-comments"> 留言</i></a><a href="/atom.xml"><i class="fa fa-rss"> 订阅</i></a></div></div><div id="layout" class="pure-g"><div class="pure-u-1 pure-u-md-3-4"><div class="content_container"><div class="post"><h1 class="post-title">多线程总结</h1><div class="post-meta">Apr 19, 2016<span> | </span><span class="category"><a href="/categories/Java基础/">Java基础</a></span><script src="https://dn-lbstatics.qbox.me/busuanzi/2.3/busuanzi.pure.mini.js" async></script><span id="busuanzi_container_page_pv"> | <span id="busuanzi_value_page_pv"></span><span> Hits</span></span></div><a data-disqus-identifier="2016/04/19/java-core-thread/" href="/2016/04/19/java-core-thread/#disqus_thread" class="disqus-comment-count"></a><div class="clear"><div id="toc" class="toc-article"><div class="toc-title">文章目录</div><ol class="toc"><li class="toc-item toc-level-1"><a class="toc-link" href="#线程的状态"><span class="toc-number">1.</span> <span class="toc-text">线程的状态</span></a><ol class="toc-child"><li class="toc-item toc-level-2"><a class="toc-link" href="#NEW"><span class="toc-number">1.1.</span> <span class="toc-text">NEW</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#RUNNABLE"><span class="toc-number">1.2.</span> <span class="toc-text">RUNNABLE</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#BLOCKED"><span class="toc-number">1.3.</span> <span class="toc-text">BLOCKED</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#WAITING"><span class="toc-number">1.4.</span> <span class="toc-text">WAITING</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#TIMED-WAITING"><span class="toc-number">1.5.</span> <span class="toc-text">TIMED_WAITING</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#TERMINATED"><span class="toc-number">1.6.</span> <span class="toc-text">TERMINATED</span></a></li></ol></li><li class="toc-item toc-level-1"><a class="toc-link" href="#线程属性"><span class="toc-number">2.</span> <span class="toc-text">线程属性</span></a><ol class="toc-child"><li class="toc-item toc-level-2"><a class="toc-link" href="#线程优先级"><span class="toc-number">2.1.</span> <span class="toc-text">线程优先级</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#守护线程"><span class="toc-number">2.2.</span> <span class="toc-text">守护线程</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#未捕获异常处理器"><span class="toc-number">2.3.</span> <span class="toc-text">未捕获异常处理器</span></a></li></ol></li><li class="toc-item toc-level-1"><a class="toc-link" href="#线程的创建"><span class="toc-number">3.</span> <span class="toc-text">线程的创建</span></a><ol class="toc-child"><li class="toc-item toc-level-2"><a class="toc-link" href="#继承Thread类"><span class="toc-number">3.1.</span> <span class="toc-text">继承Thread类</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#实现Runnable接口"><span class="toc-number">3.2.</span> <span class="toc-text">实现Runnable接口</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#实现Callable接口"><span class="toc-number">3.3.</span> <span class="toc-text">实现Callable接口</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#三种方式的比较"><span class="toc-number">3.4.</span> <span class="toc-text">三种方式的比较</span></a></li></ol></li><li class="toc-item toc-level-1"><a class="toc-link" href="#线程同步"><span class="toc-number">4.</span> <span class="toc-text">线程同步</span></a><ol class="toc-child"><li class="toc-item toc-level-2"><a class="toc-link" href="#synchronized关键字"><span class="toc-number">4.1.</span> <span class="toc-text">synchronized关键字</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#Lock"><span class="toc-number">4.2.</span> <span class="toc-text">Lock</span></a><ol class="toc-child"><li class="toc-item toc-level-3"><a class="toc-link" href="#方法摘要"><span class="toc-number">4.2.1.</span> <span class="toc-text">方法摘要</span></a></li><li class="toc-item toc-level-3"><a class="toc-link" href="#ReentrantLock"><span class="toc-number">4.2.2.</span> <span class="toc-text">ReentrantLock</span></a></li></ol></li><li class="toc-item toc-level-2"><a class="toc-link" href="#ReadWriteLock"><span class="toc-number">4.3.</span> <span class="toc-text">ReadWriteLock</span></a><ol class="toc-child"><li class="toc-item toc-level-3"><a class="toc-link" href="#方法摘要-1"><span class="toc-number">4.3.1.</span> <span class="toc-text">方法摘要</span></a></li><li class="toc-item toc-level-3"><a class="toc-link" href="#ReentrantReadWriteLock"><span class="toc-number">4.3.2.</span> <span class="toc-text">ReentrantReadWriteLock</span></a></li><li class="toc-item toc-level-3"><a class="toc-link" href="#Condition"><span class="toc-number">4.3.3.</span> <span class="toc-text">Condition</span></a></li></ol></li><li class="toc-item toc-level-2"><a class="toc-link" href="#volatile"><span class="toc-number">4.4.</span> <span class="toc-text">volatile</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#死锁"><span class="toc-number">4.5.</span> <span class="toc-text">死锁</span></a></li></ol></li><li class="toc-item toc-level-1"><a class="toc-link" href="#线程池"><span class="toc-number">5.</span> <span class="toc-text">线程池</span></a><ol class="toc-child"><li class="toc-item toc-level-2"><a class="toc-link" href="#线程池原理"><span class="toc-number">5.1.</span> <span class="toc-text">线程池原理</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#简单线程池的实现"><span class="toc-number">5.2.</span> <span class="toc-text">简单线程池的实现</span></a></li></ol></li><li class="toc-item toc-level-1"><a class="toc-link" href="#concurrent包"><span class="toc-number">6.</span> <span class="toc-text">concurrent包</span></a><ol class="toc-child"><li class="toc-item toc-level-2"><a class="toc-link" href="#执行器"><span class="toc-number">6.1.</span> <span class="toc-text">执行器</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#队列"><span class="toc-number">6.2.</span> <span class="toc-text">队列</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#计时"><span class="toc-number">6.3.</span> <span class="toc-text">计时</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#同步器"><span class="toc-number">6.4.</span> <span class="toc-text">同步器</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#并发-Collection"><span class="toc-number">6.5.</span> <span class="toc-text">并发 Collection</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#内存一致性属性"><span class="toc-number">6.6.</span> <span class="toc-text">内存一致性属性</span></a></li></ol></li><li class="toc-item toc-level-1"><a class="toc-link" href="#关于线程个数"><span class="toc-number">7.</span> <span class="toc-text">关于线程个数</span></a></li></ol></div></div><div class="post-content"><p>线程是程序执行流的最小单元,多线程程序在较低层次上扩展了多任务的概念:一个程序同时执行多个任务。通常每一个任务称为一个线程(thread),它是线程控制的简称。可以同时运行一个以上线程的程序称为多线程程序(multithreaded)。</p>
<h1 id="线程的状态"><a href="#线程的状态" class="headerlink" title="线程的状态"></a>线程的状态</h1><p>Java的线程如下有6种状态:<br>NEW(新建)<br>RUNNABLE(就绪,可运行)<br>BLOCKED(被阻塞)<br>WAITING(等待)<br>TIMED_WAITING(计时等待)<br>TERMINATED(被终止)</p>
<p>可以使用Thread.getState()方法获取线程的当前状态。在Thread类中线程状态由一个整型变量threadStatus表示,getState()方法调用VM类的toThreadState()方法,根据threadStatus某个位置上是否为1来判断线程状态,代码如下:</p>
<figure class="highlight erlang-repl"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">(var0 & <span class="number">4</span>) != <span class="number">0</span>?State.RUNNABLE:((var0 & <span class="number">1024</span>) != <span class="number">0</span>?State.BLOCKED:((var0 & <span class="number">16</span>) != <span class="number">0</span>?State.WAITING:((var0 & <span class="number">32</span>) != <span class="number">0</span>?State.TIMED_WAITING:((var0 & <span class="number">2</span>) != <span class="number">0</span>?State.TERMINATED:((var0 & <span class="number">1</span>) == <span class="number">0</span>?State.NEW:State.RUNNABLE)))));</span><br></pre></td></tr></table></figure>
<h2 id="NEW"><a href="#NEW" class="headerlink" title="NEW"></a>NEW</h2><p>当用new操作符创建一个新线程时,如new Thread(),该线程还没有运行,此时线程状态是NEW。</p>
<h2 id="RUNNABLE"><a href="#RUNNABLE" class="headerlink" title="RUNNABLE"></a>RUNNABLE</h2><p>一旦调用start()方法,线程处于RUNNABLE状态。<br><code>一个RUNNABLE状态的线程可能正在运行也可能没有运行,这取决于操作系统给线程提供运行的时间</code>。</p>
<h2 id="BLOCKED"><a href="#BLOCKED" class="headerlink" title="BLOCKED"></a>BLOCKED</h2><p>当一个线程试图获取一个内部对象锁,而该锁被其他线程持有,则该线程进入BLOCKED状态。</p>
<h2 id="WAITING"><a href="#WAITING" class="headerlink" title="WAITING"></a>WAITING</h2><p>当一个线程等待另一个线程通知调度器一个条件时,进入WAITING状态,如调用:Object.wait(), Thread.join(), Lock.lock(), Condition.await()时。</p>
<h2 id="TIMED-WAITING"><a href="#TIMED-WAITING" class="headerlink" title="TIMED_WAITING"></a>TIMED_WAITING</h2><p>有几个方法有超时参数,调用它们将导致线程进入TIMED_WAITING状态,如调用:Thread.sleep(long millis), Object.wait(long timeout), Lock.tryLock(), Condition.await(long time, TimeUnit unit)时。</p>
<h2 id="TERMINATED"><a href="#TERMINATED" class="headerlink" title="TERMINATED"></a>TERMINATED</h2><ul>
<li>因为run方法正常退出而自然终止。</li>
<li>因为一个没有捕获的异常而终止。</li>
</ul>
<p><code>stop()、suspend()、resume()已过时,不要使用。</code></p>
<h1 id="线程属性"><a href="#线程属性" class="headerlink" title="线程属性"></a>线程属性</h1><p>线程属性包括:线程优先级、守护线程、线程组以及处理未捕获异常的处理器。</p>
<h2 id="线程优先级"><a href="#线程优先级" class="headerlink" title="线程优先级"></a>线程优先级</h2><p>在Java中每一个线程都有一个优先级,使用setPriority()方法可以设置线程的优先级,可以将优先级置为在MIN_PRIORITY(在Thread类中定义为0)和MAX_PRIORITY(在Thread类中定义为10)之间的任何值。NORM_PRIORITY被定义为5。</p>
<p><code>注意:线程优先级高度依赖于系统,Windows有7个优先级,Sun为Linux提供的Java虚拟机,线程的优先级被忽略——所有的线程具有相同的优先级。</code></p>
<h2 id="守护线程"><a href="#守护线程" class="headerlink" title="守护线程"></a>守护线程</h2><p>通过调用方法setDaemon(true)来将线程设置为守护线程,守护线程的唯一作用是为其他线程提供服务。当只剩下守护线程时,虚拟机就退出了,所以守护线程中永远不应该去访问固有资源,如文件、数据库,因为它会在任何时候甚至在一个操作的中间发生中断。</p>
<h2 id="未捕获异常处理器"><a href="#未捕获异常处理器" class="headerlink" title="未捕获异常处理器"></a>未捕获异常处理器</h2><p>该处理器必须实现Thead.UncaughtExceptionHandler接口,该接口只有一个方法:void uncaughtException(Thread t, Throwable e)。<br>使用setUncaughtExceptionHandler()方法为线程安装一个处理器,也可以使用静态方法Thread.setUncaughtExceptionHandler()为所有线程安装一个默认的处理器。</p>
<h1 id="线程的创建"><a href="#线程的创建" class="headerlink" title="线程的创建"></a>线程的创建</h1><p>创建线程有三种方法:</p>
<ul>
<li>继承Thread类</li>
<li>实现Runnable接口</li>
<li>实现Callable接口</li>
</ul>
<h2 id="继承Thread类"><a href="#继承Thread类" class="headerlink" title="继承Thread类"></a>继承Thread类</h2><p>步骤如下</p>
<ol>
<li>继承Thread类,重写run方法</li>
<li>创建线程对象</li>
<li>执行start方法</li>
</ol>
<figure class="highlight java"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">public</span> <span class="class"><span class="keyword">class</span> <span class="title">ExtendsThread</span> <span class="keyword">extends</span> <span class="title">Thread</span></span>{</span><br><span class="line"> <span class="meta">@Override</span></span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="keyword">void</span> <span class="title">run</span><span class="params">()</span> </span>{</span><br><span class="line"> <span class="keyword">for</span> (<span class="keyword">int</span> i = <span class="number">0</span>; i < <span class="number">10</span>; i++) {</span><br><span class="line"> System.out.println(getName());</span><br><span class="line"> <span class="keyword">try</span> {</span><br><span class="line"> Thread.sleep(<span class="number">10</span>);</span><br><span class="line"> } <span class="keyword">catch</span> (InterruptedException e) {</span><br><span class="line"> e.printStackTrace();</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="keyword">static</span> <span class="keyword">void</span> <span class="title">main</span><span class="params">(String args[])</span> </span>{</span><br><span class="line"> ExtendsThread subThread1 = <span class="keyword">new</span> ExtendsThread();</span><br><span class="line"> subThread1.setName(<span class="string">"subThread1"</span>);</span><br><span class="line"> ExtendsThread subThread2 = <span class="keyword">new</span> ExtendsThread();</span><br><span class="line"> subThread2.setName(<span class="string">"subThread2"</span>);</span><br><span class="line"> subThread1.start();</span><br><span class="line"> subThread2.start();</span><br><span class="line"> }</span><br><span class="line">}</span><br></pre></td></tr></table></figure>
<p>输出结果(不固定):</p>
<figure class="highlight armasm"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">subThread1</span><br><span class="line"></span><span class="keyword">subThread2</span><br><span class="line"></span><span class="keyword">subThread2</span><br><span class="line"></span><span class="keyword">subThread1</span><br><span class="line"></span><span class="keyword">subThread2</span><br><span class="line"></span><span class="keyword">subThread1</span><br><span class="line"></span><span class="keyword">subThread2</span><br><span class="line"></span><span class="keyword">subThread1</span><br><span class="line"></span><span class="keyword">subThread2</span><br><span class="line"></span><span class="keyword">subThread1</span><br><span class="line"></span><span class="keyword">subThread2</span><br><span class="line"></span><span class="keyword">subThread1</span><br><span class="line"></span><span class="keyword">subThread2</span><br><span class="line"></span><span class="keyword">subThread1</span><br><span class="line"></span><span class="keyword">subThread1</span><br><span class="line"></span><span class="keyword">subThread2</span><br><span class="line"></span><span class="keyword">subThread1</span><br><span class="line"></span><span class="keyword">subThread2</span><br><span class="line"></span><span class="keyword">subThread1</span><br><span class="line"></span><span class="keyword">subThread2</span></span><br></pre></td></tr></table></figure>
<p><code>注意:不要调用Thread类或Runnable对象的run方法,直接调用run方法,只会执行同一个线程中的任务,而不会启动新线程。应该调用Thread.start方法,这个方法将创建一个执行run方法的新线程。</code></p>
<h2 id="实现Runnable接口"><a href="#实现Runnable接口" class="headerlink" title="实现Runnable接口"></a>实现Runnable接口</h2><ol>
<li>创建Runnable的实现类,重写run方法</li>
<li>创建该Runnable实现类的对象,并以该对象为参数创建Thread实例</li>
<li>执行start方法</li>
</ol>
<figure class="highlight java"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">public</span> <span class="class"><span class="keyword">class</span> <span class="title">ImplRunnable</span> </span>{</span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="keyword">static</span> <span class="keyword">void</span> <span class="title">main</span><span class="params">(String args[])</span></span>{</span><br><span class="line"> MyRunnable myRunnable = <span class="keyword">new</span> MyRunnable();</span><br><span class="line"> Thread subThread1 = <span class="keyword">new</span> Thread(myRunnable, <span class="string">"subThread1"</span>);</span><br><span class="line"> Thread subThread2 = <span class="keyword">new</span> Thread(myRunnable, <span class="string">"subThread2"</span>);</span><br><span class="line"> subThread1.start();</span><br><span class="line"> subThread2.start();</span><br><span class="line"> }</span><br><span class="line"> <span class="keyword">static</span> <span class="class"><span class="keyword">class</span> <span class="title">MyRunnable</span> <span class="keyword">implements</span> <span class="title">Runnable</span></span>{</span><br><span class="line"> <span class="meta">@Override</span></span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="keyword">void</span> <span class="title">run</span><span class="params">()</span> </span>{</span><br><span class="line"> <span class="keyword">for</span> (<span class="keyword">int</span> i = <span class="number">0</span>; i < <span class="number">10</span>; i++) {</span><br><span class="line"> System.out.println(Thread.currentThread().getName());</span><br><span class="line"> <span class="keyword">try</span> {</span><br><span class="line"> Thread.sleep(<span class="number">10</span>);</span><br><span class="line"> } <span class="keyword">catch</span> (InterruptedException e) {</span><br><span class="line"> e.printStackTrace();</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line">}</span><br></pre></td></tr></table></figure>
<p>输出结果(不固定):</p>
<figure class="highlight armasm"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">subThread1</span><br><span class="line"></span><span class="keyword">subThread2</span><br><span class="line"></span><span class="keyword">subThread1</span><br><span class="line"></span><span class="keyword">subThread2</span><br><span class="line"></span><span class="keyword">subThread1</span><br><span class="line"></span><span class="keyword">subThread2</span><br><span class="line"></span><span class="keyword">subThread1</span><br><span class="line"></span><span class="keyword">subThread2</span><br><span class="line"></span><span class="keyword">subThread1</span><br><span class="line"></span><span class="keyword">subThread2</span><br><span class="line"></span><span class="keyword">subThread2</span><br><span class="line"></span><span class="keyword">subThread1</span><br><span class="line"></span><span class="keyword">subThread1</span><br><span class="line"></span><span class="keyword">subThread2</span><br><span class="line"></span><span class="keyword">subThread1</span><br><span class="line"></span><span class="keyword">subThread2</span><br><span class="line"></span><span class="keyword">subThread2</span><br><span class="line"></span><span class="keyword">subThread1</span><br><span class="line"></span><span class="keyword">subThread2</span><br><span class="line"></span><span class="keyword">subThread1</span></span><br></pre></td></tr></table></figure>
<h2 id="实现Callable接口"><a href="#实现Callable接口" class="headerlink" title="实现Callable接口"></a>实现Callable接口</h2><ol>
<li>创建Callable的实现类,重写call()方法</li>
<li>创建该Callable实现类的对象,并以该对象为参数创建FutureTask对象</li>
<li>以该FutureTask对象为参数,创建Thread实例</li>
<li>执行start方法,并可调用FutureTask对象的方法获取线程执行的状态及返回结果。</li>
</ol>
<figure class="highlight java"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">public</span> <span class="class"><span class="keyword">class</span> <span class="title">ImplCallable</span> </span>{</span><br><span class="line"> </span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="keyword">static</span> <span class="keyword">void</span> <span class="title">main</span><span class="params">(String[] args)</span> <span class="keyword">throws</span> ExecutionException, InterruptedException </span>{</span><br><span class="line"> MyCallable myCallable1 = <span class="keyword">new</span> MyCallable(<span class="number">10</span>);</span><br><span class="line"> MyCallable myCallable2 = <span class="keyword">new</span> MyCallable(<span class="number">20</span>);</span><br><span class="line"> FutureTask<Integer> futureTask1 = <span class="keyword">new</span> FutureTask(myCallable1);</span><br><span class="line"> FutureTask<Integer> futureTask2 = <span class="keyword">new</span> FutureTask(myCallable2);</span><br><span class="line"> Thread subThread1 = <span class="keyword">new</span> Thread(futureTask1);</span><br><span class="line"> Thread subThread2 = <span class="keyword">new</span> Thread(futureTask2);</span><br><span class="line"> subThread1.start();</span><br><span class="line"> subThread2.start();</span><br><span class="line"> System.out.println(futureTask1.get());</span><br><span class="line"> System.out.println(futureTask2.get());</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> <span class="keyword">static</span> <span class="class"><span class="keyword">class</span> <span class="title">MyCallable</span> <span class="keyword">implements</span> <span class="title">Callable</span><<span class="title">Integer</span>></span>{</span><br><span class="line"> <span class="keyword">private</span> <span class="keyword">int</span> num;</span><br><span class="line"></span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="title">MyCallable</span><span class="params">(<span class="keyword">int</span> num)</span> </span>{</span><br><span class="line"> <span class="keyword">this</span>.num = num;</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="keyword">int</span> <span class="title">getNum</span><span class="params">()</span> </span>{</span><br><span class="line"> <span class="keyword">return</span> num;</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="keyword">void</span> <span class="title">setNum</span><span class="params">(<span class="keyword">int</span> num)</span> </span>{</span><br><span class="line"> <span class="keyword">this</span>.num = num;</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> <span class="meta">@Override</span></span><br><span class="line"> <span class="function"><span class="keyword">public</span> Integer <span class="title">call</span><span class="params">()</span> <span class="keyword">throws</span> Exception </span>{</span><br><span class="line"> <span class="keyword">for</span> (<span class="keyword">int</span> i = <span class="number">0</span>; i < num; i++) {</span><br><span class="line"> System.out.println(Thread.currentThread().getName());</span><br><span class="line"> <span class="keyword">try</span> {</span><br><span class="line"> Thread.sleep(<span class="number">10</span>);</span><br><span class="line"> } <span class="keyword">catch</span> (InterruptedException e) {</span><br><span class="line"> e.printStackTrace();</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"> <span class="keyword">return</span> num;</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"> </span><br><span class="line">}</span><br></pre></td></tr></table></figure>
<p>输出结果(不固定):</p>
<figure class="highlight lasso"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">Thread</span><span class="number">-9</span></span><br><span class="line"><span class="keyword">Thread</span><span class="number">-8</span></span><br><span class="line"><span class="keyword">Thread</span><span class="number">-9</span></span><br><span class="line"><span class="keyword">Thread</span><span class="number">-8</span></span><br><span class="line"><span class="keyword">Thread</span><span class="number">-9</span></span><br><span class="line"><span class="keyword">Thread</span><span class="number">-8</span></span><br><span class="line"><span class="keyword">Thread</span><span class="number">-9</span></span><br><span class="line"><span class="keyword">Thread</span><span class="number">-8</span></span><br><span class="line"><span class="keyword">Thread</span><span class="number">-9</span></span><br><span class="line"><span class="keyword">Thread</span><span class="number">-8</span></span><br><span class="line"><span class="keyword">Thread</span><span class="number">-9</span></span><br><span class="line"><span class="keyword">Thread</span><span class="number">-8</span></span><br><span class="line"><span class="keyword">Thread</span><span class="number">-9</span></span><br><span class="line"><span class="keyword">Thread</span><span class="number">-8</span></span><br><span class="line"><span class="keyword">Thread</span><span class="number">-9</span></span><br><span class="line"><span class="keyword">Thread</span><span class="number">-8</span></span><br><span class="line"><span class="keyword">Thread</span><span class="number">-9</span></span><br><span class="line"><span class="keyword">Thread</span><span class="number">-8</span></span><br><span class="line"><span class="keyword">Thread</span><span class="number">-9</span></span><br><span class="line"><span class="keyword">Thread</span><span class="number">-8</span></span><br><span class="line"><span class="keyword">Thread</span><span class="number">-9</span></span><br><span class="line"><span class="number">10</span></span><br><span class="line"><span class="keyword">Thread</span><span class="number">-9</span></span><br><span class="line"><span class="keyword">Thread</span><span class="number">-9</span></span><br><span class="line"><span class="keyword">Thread</span><span class="number">-9</span></span><br><span class="line"><span class="keyword">Thread</span><span class="number">-9</span></span><br><span class="line"><span class="keyword">Thread</span><span class="number">-9</span></span><br><span class="line"><span class="keyword">Thread</span><span class="number">-9</span></span><br><span class="line"><span class="keyword">Thread</span><span class="number">-9</span></span><br><span class="line"><span class="keyword">Thread</span><span class="number">-9</span></span><br><span class="line"><span class="keyword">Thread</span><span class="number">-9</span></span><br><span class="line"><span class="number">20</span></span><br></pre></td></tr></table></figure>
<h2 id="三种方式的比较"><a href="#三种方式的比较" class="headerlink" title="三种方式的比较"></a>三种方式的比较</h2><ol>
<li>Java中没有多重继承,因此在使用继承Thread类的方式创建线程时,不能再继承其他类;使用Runnable、Callable接口创建多线程时,还可以继承其他类,在这种方式下,多个线程可以共享同一个target对象,所以非常适合多个相同线程来处理同一份资源的情况; </li>
<li>使用Callable接口可以从线程中获取返回值。</li>
</ol>
<h1 id="线程同步"><a href="#线程同步" class="headerlink" title="线程同步"></a>线程同步</h1><h2 id="synchronized关键字"><a href="#synchronized关键字" class="headerlink" title="synchronized关键字"></a>synchronized关键字</h2><p>从1.0版本开始,Java中的每一个对象都有一个内部锁,如果一个方法使用synchronized关键字声明,线程将获得对象的内部锁。<br>synchronized有两种方式:锁方法、锁对象。</p>
<ol>
<li>锁方法,即用synchronized关键字修饰方法:</li>
</ol>
<figure class="highlight oxygene"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">// 获得的是对象的内部锁</span></span><br><span class="line"><span class="keyword">public</span> synchronized void <span class="function"><span class="keyword">method</span><span class="params">()</span><span class="comment">{</span><br><span class="line"> method body</span><br><span class="line">}</span></span></span><br></pre></td></tr></table></figure>
<figure class="highlight d"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">// 获得的是类锁,由于一个class不论被实例化多少次,其中的静态方法和静态变量在内存中都只由一份。所以,一旦一个静态的方法被申明为synchronized。此类所有的实例化对象在调用此方法,共用同一把锁。</span></span><br><span class="line"><span class="keyword">public</span> <span class="keyword">static</span> <span class="keyword">synchronized</span> <span class="keyword">void</span>(){</span><br><span class="line"> method <span class="keyword">body</span></span><br><span class="line">}</span><br></pre></td></tr></table></figure>
<ol>
<li>锁对象:</li>
</ol>
<figure class="highlight oxygene"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">public</span> void <span class="function"><span class="keyword">method</span><span class="params">()</span><span class="comment">{</span><br><span class="line"> // 获得的是对象的内部锁</span><br><span class="line"> synchronized(this){</span><br><span class="line"> code block</span><br><span class="line"> }</span> </span><br><span class="line">}</span></span><br></pre></td></tr></table></figure>
<figure class="highlight oxygene"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br></pre></td><td class="code"><pre><span class="line">Object obj = <span class="keyword">new</span> Object();</span><br><span class="line"><span class="keyword">public</span> void <span class="function"><span class="keyword">method</span><span class="params">()</span><span class="comment">{</span><br><span class="line"> // 获得的是obj的内部锁</span><br><span class="line"> synchronized(obj){</span><br><span class="line"> code block</span><br><span class="line"> }</span></span><br><span class="line">}</span></span><br></pre></td></tr></table></figure>
<figure class="highlight oxygene"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">public</span> void <span class="function"><span class="keyword">method</span><span class="params">()</span><span class="comment">{</span><br><span class="line"> // 获得的是类锁</span><br><span class="line"> synchronized(xxx.class){</span><br><span class="line"> code block</span><br><span class="line"> }</span> </span><br><span class="line">}</span></span><br></pre></td></tr></table></figure>
<figure class="highlight oxygene"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">public</span> void <span class="function"><span class="keyword">method</span><span class="params">()</span><span class="comment">{</span><br><span class="line"> // 获得的是类锁</span><br><span class="line"> synchronized(Class.forName("xxx")){</span><br><span class="line"> code block</span><br><span class="line"> }</span> </span><br><span class="line">}</span></span><br></pre></td></tr></table></figure>
<h2 id="Lock"><a href="#Lock" class="headerlink" title="Lock"></a>Lock</h2><p>Lock(锁对象)允许把锁的实现作为Java类,而不是作为语言的特性来实现,这就为Lock的多种实现留下了空间,各种实现可能有不同的调度算法、性能特性或者锁定语义。</p>
<h3 id="方法摘要"><a href="#方法摘要" class="headerlink" title="方法摘要"></a>方法摘要</h3><table>
<thead>
<tr>
<th>返回值</th>
<th>方法</th>
<th>解释</th>
</tr>
</thead>
<tbody>
<tr>
<td>void</td>
<td>lock()</td>
<td>获取锁。</td>
</tr>
<tr>
<td>void</td>
<td>lockInterruptibly()</td>
<td>如果当前线程未被中断,则获取锁。</td>
</tr>
<tr>
<td>Condition</td>
<td>newCondition()</td>
<td>返回绑定到此 Lock 实例的新 Condition 实例。</td>
</tr>
<tr>
<td>boolean</td>
<td>tryLock()</td>
<td>仅在调用时锁为空闲状态才获取该锁。</td>
</tr>
<tr>
<td>boolean</td>
<td>tryLock(long time, TimeUnit unit)</td>
<td>如果锁在给定的等待时间内空闲,并且当前线程未被中断,则获取锁。</td>
</tr>
<tr>
<td>void</td>
<td>unlock()</td>
<td>释放锁。</td>
</tr>
</tbody>
</table>
<h3 id="ReentrantLock"><a href="#ReentrantLock" class="headerlink" title="ReentrantLock"></a>ReentrantLock</h3><p>ReentrantLock类是Lock的一个实现,使用ReentrantLock的代码块结构如下:</p>
<figure class="highlight less"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line">myLock<span class="selector-class">.lock</span>();</span><br><span class="line"><span class="selector-tag">try</span>{</span><br><span class="line"> ......</span><br><span class="line">} <span class="selector-tag">finally</span> {</span><br><span class="line"> myLock<span class="selector-class">.unlock</span>();</span><br><span class="line">}</span><br></pre></td></tr></table></figure>
<p><code>ReentrantLock(true)可以构造一个带有公平策略的锁,听起来公平锁更合理一些,但是使用公平锁比使用常规锁要慢很多。</code></p>
<h2 id="ReadWriteLock"><a href="#ReadWriteLock" class="headerlink" title="ReadWriteLock"></a>ReadWriteLock</h2><p>ReadWriteLock 维护了一对相关的锁,一个用于只读操作,另一个用于写入操作。只要没有 writer,读取锁可以由多个 reader 线程同时保持。写入锁是独占的。<br>与互斥锁相比,读-写锁允许对共享数据进行更高级别的并发访问。虽然一次只有一个线程(writer 线程)可以修改共享数据,但在许多情况下,任何数量的线程可以同时读取共享数据(reader 线程),读-写锁利用了这一点。从理论上讲,与互斥锁相比,使用读-写锁所允许的并发性增强将带来更大的性能提高。在实践中,只有在多处理器上并且只在访问模式适用于共享数据时,才能完全实现并发性增强。</p>
<h3 id="方法摘要-1"><a href="#方法摘要-1" class="headerlink" title="方法摘要"></a>方法摘要</h3><table>
<thead>
<tr>
<th>返回值</th>
<th>方法</th>
<th>解释</th>
</tr>
</thead>
<tbody>
<tr>
<td>Lock</td>
<td>readLock()</td>
<td>返回用于读取操作的锁。</td>
</tr>
<tr>
<td>Lock</td>
<td>writeLock()</td>
<td>返回用于写入操作的锁。</td>
</tr>
</tbody>
</table>
<h3 id="ReentrantReadWriteLock"><a href="#ReentrantReadWriteLock" class="headerlink" title="ReentrantReadWriteLock"></a>ReentrantReadWriteLock</h3><p>ReentrantReadWriteLock是ReadWriteLock的实现类,其使用方式如下:</p>
<figure class="highlight cs"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br></pre></td><td class="code"><pre><span class="line">ReentrantReadWriteLock <span class="keyword">lock</span> = <span class="keyword">new</span> ReentrantReadWriteLock();</span><br><span class="line"><span class="keyword">lock</span>.readLock().<span class="keyword">lock</span>(); <span class="comment">// 获得读锁</span></span><br><span class="line"><span class="keyword">lock</span>.readLock().unLock(); <span class="comment">// 释放读锁</span></span><br><span class="line"><span class="keyword">lock</span>.writeLock().<span class="keyword">lock</span>(); <span class="comment">// 获得写锁</span></span><br><span class="line"><span class="keyword">lock</span>.writeLock().unLock(); <span class="comment">// 释放写锁</span></span><br></pre></td></tr></table></figure>
<p>下面来看一个实际的例子,使用读写锁来实现查询、存钱、取钱:<br>例:</p>
<figure class="highlight cs"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br><span class="line">46</span><br><span class="line">47</span><br><span class="line">48</span><br><span class="line">49</span><br><span class="line">50</span><br><span class="line">51</span><br><span class="line">52</span><br><span class="line">53</span><br><span class="line">54</span><br><span class="line">55</span><br><span class="line">56</span><br><span class="line">57</span><br><span class="line">58</span><br><span class="line">59</span><br><span class="line">60</span><br><span class="line">61</span><br><span class="line">62</span><br><span class="line">63</span><br><span class="line">64</span><br><span class="line">65</span><br><span class="line">66</span><br><span class="line">67</span><br><span class="line">68</span><br><span class="line">69</span><br><span class="line">70</span><br><span class="line">71</span><br><span class="line">72</span><br><span class="line">73</span><br><span class="line">74</span><br><span class="line">75</span><br><span class="line">76</span><br><span class="line">77</span><br><span class="line">78</span><br><span class="line">79</span><br><span class="line">80</span><br><span class="line">81</span><br><span class="line">82</span><br><span class="line">83</span><br><span class="line">84</span><br><span class="line">85</span><br><span class="line">86</span><br><span class="line">87</span><br><span class="line">88</span><br><span class="line">89</span><br><span class="line">90</span><br><span class="line">91</span><br><span class="line">92</span><br><span class="line">93</span><br><span class="line">94</span><br><span class="line">95</span><br><span class="line">96</span><br><span class="line">97</span><br><span class="line">98</span><br><span class="line">99</span><br><span class="line">100</span><br><span class="line">101</span><br><span class="line">102</span><br><span class="line">103</span><br><span class="line">104</span><br><span class="line">105</span><br><span class="line">106</span><br><span class="line">107</span><br><span class="line">108</span><br><span class="line">109</span><br><span class="line">110</span><br><span class="line">111</span><br><span class="line">112</span><br><span class="line">113</span><br><span class="line">114</span><br><span class="line">115</span><br><span class="line">116</span><br><span class="line">117</span><br><span class="line">118</span><br><span class="line">119</span><br><span class="line">120</span><br><span class="line">121</span><br><span class="line">122</span><br><span class="line">123</span><br><span class="line">124</span><br><span class="line">125</span><br><span class="line">126</span><br><span class="line">127</span><br><span class="line">128</span><br><span class="line">129</span><br><span class="line">130</span><br><span class="line">131</span><br><span class="line">132</span><br><span class="line">133</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">public</span> <span class="keyword">class</span> <span class="title">ReadWriteLockStudy</span> {</span><br><span class="line"></span><br><span class="line"> <span class="keyword">static</span> <span class="keyword">class</span> <span class="title">Account</span> {</span><br><span class="line"></span><br><span class="line"> <span class="keyword">private</span> <span class="keyword">long</span> balance;</span><br><span class="line"></span><br><span class="line"> <span class="keyword">private</span> ReadWriteLock <span class="keyword">lock</span> = <span class="keyword">new</span> ReentrantReadWriteLock();</span><br><span class="line"> <span class="keyword">private</span> Lock readLock = <span class="keyword">lock</span>.readLock();</span><br><span class="line"> <span class="keyword">private</span> Lock writeLock = <span class="keyword">lock</span>.writeLock();</span><br><span class="line"></span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="keyword">void</span> <span class="title">show</span>(<span class="params"></span>) </span>{</span><br><span class="line"> readLock.<span class="keyword">lock</span>();</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"获得readLock -- "</span> + Thread.currentThread().getName());</span><br><span class="line"> <span class="keyword">try</span> {</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"显示余额:"</span> + balance);</span><br><span class="line"> } <span class="keyword">finally</span> {</span><br><span class="line"> readLock.unlock();</span><br><span class="line"> }</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"释放readLock -- "</span> + Thread.currentThread().getName());</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="keyword">void</span> <span class="title">put</span>(<span class="params"><span class="keyword">long</span> money</span>) </span>{</span><br><span class="line"> writeLock.<span class="keyword">lock</span>();</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"获得writeLock -- "</span> + Thread.currentThread().getName());</span><br><span class="line"> <span class="keyword">try</span> {</span><br><span class="line"> balance += money;</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"存入:"</span> + money + <span class="string">",余额:"</span> + balance);</span><br><span class="line"> } <span class="keyword">finally</span> {</span><br><span class="line"> writeLock.unlock();</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"释放writeLock -- "</span> + Thread.currentThread().getName());</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="keyword">void</span> <span class="title">take</span>(<span class="params"><span class="keyword">long</span> money</span>) </span>{</span><br><span class="line"> writeLock.<span class="keyword">lock</span>();</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"获得writeLock -- "</span> + Thread.currentThread().getName());</span><br><span class="line"> <span class="keyword">try</span> {</span><br><span class="line"> <span class="keyword">if</span> (balance < money)</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"余额不足, 需取: "</span> + money + <span class="string">"; 余额: "</span> + balance);</span><br><span class="line"> <span class="keyword">else</span> {</span><br><span class="line"> balance -= money;</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"取出:"</span> + money + <span class="string">",余额:"</span> + balance);</span><br><span class="line"> }</span><br><span class="line"> } <span class="keyword">finally</span> {</span><br><span class="line"> writeLock.unlock();</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"释放writeLock -- "</span> + Thread.currentThread().getName());</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> <span class="keyword">static</span> <span class="keyword">class</span> <span class="title">PutThread</span> <span class="title">extends</span> <span class="title">Thread</span> {</span><br><span class="line"> <span class="keyword">private</span> Account account;</span><br><span class="line"></span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="title">PutThread</span>(<span class="params">Account account</span>) </span>{</span><br><span class="line"> <span class="keyword">this</span>.account = account;</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> @<span class="function">Override</span><br><span class="line"> <span class="keyword">public</span> <span class="keyword">void</span> <span class="title">run</span>(<span class="params"></span>) </span>{</span><br><span class="line"> Random random = <span class="keyword">new</span> Random();</span><br><span class="line"> <span class="keyword">int</span> i = <span class="number">0</span>;</span><br><span class="line"> <span class="keyword">while</span> (<span class="literal">true</span>) {</span><br><span class="line"> <span class="keyword">int</span> money = random.nextInt(<span class="number">100</span>);</span><br><span class="line"> <span class="keyword">try</span> {</span><br><span class="line"> account.put(money);</span><br><span class="line"> sleep(<span class="number">1000</span>);</span><br><span class="line"> } <span class="keyword">catch</span> (InterruptedException e) {</span><br><span class="line"> e.printStackTrace();</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> <span class="keyword">static</span> <span class="keyword">class</span> <span class="title">TakeThread</span> <span class="title">extends</span> <span class="title">Thread</span> {</span><br><span class="line"> <span class="keyword">private</span> Account account;</span><br><span class="line"></span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="title">TakeThread</span>(<span class="params">Account account</span>) </span>{</span><br><span class="line"> <span class="keyword">this</span>.account = account;</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> @<span class="function">Override</span><br><span class="line"> <span class="keyword">public</span> <span class="keyword">void</span> <span class="title">run</span>(<span class="params"></span>) </span>{</span><br><span class="line"> Random random = <span class="keyword">new</span> Random();</span><br><span class="line"> <span class="keyword">while</span> (<span class="literal">true</span>) {</span><br><span class="line"> <span class="keyword">int</span> money = random.nextInt(<span class="number">100</span>);</span><br><span class="line"> <span class="keyword">try</span> {</span><br><span class="line"> account.take(money);</span><br><span class="line"> sleep(<span class="number">1000</span>);</span><br><span class="line"> } <span class="keyword">catch</span> (InterruptedException e) {</span><br><span class="line"> e.printStackTrace();</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> <span class="keyword">static</span> <span class="keyword">class</span> <span class="title">ShowThread</span> <span class="title">extends</span> <span class="title">Thread</span> {</span><br><span class="line"> <span class="keyword">private</span> Account account;</span><br><span class="line"></span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="title">ShowThread</span>(<span class="params">Account account</span>) </span>{</span><br><span class="line"> <span class="keyword">this</span>.account = account;</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> @<span class="function">Override</span><br><span class="line"> <span class="keyword">public</span> <span class="keyword">void</span> <span class="title">run</span>(<span class="params"></span>) </span>{</span><br><span class="line"> <span class="keyword">while</span> (<span class="literal">true</span>) {</span><br><span class="line"> account.show();</span><br><span class="line"> <span class="keyword">try</span> {</span><br><span class="line"> sleep(<span class="number">10</span>);</span><br><span class="line"> } <span class="keyword">catch</span> (InterruptedException e) {</span><br><span class="line"> e.printStackTrace();</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="keyword">static</span> <span class="keyword">void</span> <span class="title">main</span>(<span class="params">String args[]</span>) </span>{</span><br><span class="line"> Account account = <span class="keyword">new</span> Account();</span><br><span class="line"> PutThread putThread = <span class="keyword">new</span> PutThread(account);</span><br><span class="line"> putThread.setName(<span class="string">"putThread"</span>);</span><br><span class="line"> TakeThread takeThread = <span class="keyword">new</span> TakeThread(account);</span><br><span class="line"> takeThread.setName(<span class="string">"takeThread"</span>);</span><br><span class="line"> ShowThread showThread1 = <span class="keyword">new</span> ShowThread(account);</span><br><span class="line"> showThread1.setName(<span class="string">"showThread1"</span>);</span><br><span class="line"> ShowThread showThread2 = <span class="keyword">new</span> ShowThread(account);</span><br><span class="line"> showThread2.setName(<span class="string">"showThread2"</span>);</span><br><span class="line"></span><br><span class="line"> putThread.start();</span><br><span class="line"> takeThread.start();</span><br><span class="line"> showThread1.start();</span><br><span class="line"> showThread2.start();</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line">}</span><br></pre></td></tr></table></figure>
<p>输出结果(不固定):</p>
<figure class="highlight applescript"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br></pre></td><td class="code"><pre><span class="line">获得writeLock <span class="comment">-- putThread</span></span><br><span class="line">存入:<span class="number">46</span>,余额:<span class="number">46</span></span><br><span class="line">释放writeLock <span class="comment">-- putThread</span></span><br><span class="line">获得writeLock <span class="comment">-- takeThread</span></span><br><span class="line">余额不足, 需取: <span class="number">95</span>; 余额: <span class="number">46</span></span><br><span class="line">释放writeLock <span class="comment">-- takeThread</span></span><br><span class="line">获得readLock <span class="comment">-- showThread1</span></span><br><span class="line">显示余额:<span class="number">46</span></span><br><span class="line">释放readLock <span class="comment">-- showThread1</span></span><br><span class="line">获得readLock <span class="comment">-- showThread2</span></span><br><span class="line">显示余额:<span class="number">46</span></span><br><span class="line">释放readLock <span class="comment">-- showThread2</span></span><br><span class="line">...</span><br><span class="line">...</span><br><span class="line">...</span><br><span class="line">获得readLock <span class="comment">-- showThread2</span></span><br><span class="line">获得readLock <span class="comment">-- showThread1</span></span><br><span class="line">显示余额:<span class="number">46</span></span><br><span class="line">显示余额:<span class="number">46</span></span><br><span class="line">释放readLock <span class="comment">-- showThread1</span></span><br><span class="line">释放readLock <span class="comment">-- showThread2</span></span><br></pre></td></tr></table></figure>
<p>可以看到读-读不互斥,读-写互斥,写-写互斥。</p>
<h3 id="Condition"><a href="#Condition" class="headerlink" title="Condition"></a>Condition</h3><p>条件(也称为条件队列 或条件变量)为线程提供了一个含义,以便在某个状态条件现在可能为 true 的另一个线程通知它之前,一直挂起该线程(即让其“等待”)。因为访问此共享状态信息发生在不同的线程中,所以它必须受保护,因此要将某种形式的锁与该条件相关联。等待提供一个条件的主要属性是:以原子方式 释放相关的锁,并挂起当前线程,就像 Object.wait 做的那样。<br>Condition可以替代传统的线程间通信,用await()替换wait(),用signal()替换notify(),用signalAll()替换notifyAll()。<br><code>不直接使用wait(), notify(), notifyAll()是因为 这几个方法是final方法。</code><br>可以为多个线程间建立不同的Condition。</p>
<p>将上面的例子修改一下,增加两个功能:</p>
<ol>
<li>当余额少于100时,马上存入100;</li>
<li>当余额大于200时,马上取出100;</li>
</ol>
<p>代码如下:</p>
<figure class="highlight cs"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br><span class="line">46</span><br><span class="line">47</span><br><span class="line">48</span><br><span class="line">49</span><br><span class="line">50</span><br><span class="line">51</span><br><span class="line">52</span><br><span class="line">53</span><br><span class="line">54</span><br><span class="line">55</span><br><span class="line">56</span><br><span class="line">57</span><br><span class="line">58</span><br><span class="line">59</span><br><span class="line">60</span><br><span class="line">61</span><br><span class="line">62</span><br><span class="line">63</span><br><span class="line">64</span><br><span class="line">65</span><br><span class="line">66</span><br><span class="line">67</span><br><span class="line">68</span><br><span class="line">69</span><br><span class="line">70</span><br><span class="line">71</span><br><span class="line">72</span><br><span class="line">73</span><br><span class="line">74</span><br><span class="line">75</span><br><span class="line">76</span><br><span class="line">77</span><br><span class="line">78</span><br><span class="line">79</span><br><span class="line">80</span><br><span class="line">81</span><br><span class="line">82</span><br><span class="line">83</span><br><span class="line">84</span><br><span class="line">85</span><br><span class="line">86</span><br><span class="line">87</span><br><span class="line">88</span><br><span class="line">89</span><br><span class="line">90</span><br><span class="line">91</span><br><span class="line">92</span><br><span class="line">93</span><br><span class="line">94</span><br><span class="line">95</span><br><span class="line">96</span><br><span class="line">97</span><br><span class="line">98</span><br><span class="line">99</span><br><span class="line">100</span><br><span class="line">101</span><br><span class="line">102</span><br><span class="line">103</span><br><span class="line">104</span><br><span class="line">105</span><br><span class="line">106</span><br><span class="line">107</span><br><span class="line">108</span><br><span class="line">109</span><br><span class="line">110</span><br><span class="line">111</span><br><span class="line">112</span><br><span class="line">113</span><br><span class="line">114</span><br><span class="line">115</span><br><span class="line">116</span><br><span class="line">117</span><br><span class="line">118</span><br><span class="line">119</span><br><span class="line">120</span><br><span class="line">121</span><br><span class="line">122</span><br><span class="line">123</span><br><span class="line">124</span><br><span class="line">125</span><br><span class="line">126</span><br><span class="line">127</span><br><span class="line">128</span><br><span class="line">129</span><br><span class="line">130</span><br><span class="line">131</span><br><span class="line">132</span><br><span class="line">133</span><br><span class="line">134</span><br><span class="line">135</span><br><span class="line">136</span><br><span class="line">137</span><br><span class="line">138</span><br><span class="line">139</span><br><span class="line">140</span><br><span class="line">141</span><br><span class="line">142</span><br><span class="line">143</span><br><span class="line">144</span><br><span class="line">145</span><br><span class="line">146</span><br><span class="line">147</span><br><span class="line">148</span><br><span class="line">149</span><br><span class="line">150</span><br><span class="line">151</span><br><span class="line">152</span><br><span class="line">153</span><br><span class="line">154</span><br><span class="line">155</span><br><span class="line">156</span><br><span class="line">157</span><br><span class="line">158</span><br><span class="line">159</span><br><span class="line">160</span><br><span class="line">161</span><br><span class="line">162</span><br><span class="line">163</span><br><span class="line">164</span><br><span class="line">165</span><br><span class="line">166</span><br><span class="line">167</span><br><span class="line">168</span><br><span class="line">169</span><br><span class="line">170</span><br><span class="line">171</span><br><span class="line">172</span><br><span class="line">173</span><br><span class="line">174</span><br><span class="line">175</span><br><span class="line">176</span><br><span class="line">177</span><br><span class="line">178</span><br><span class="line">179</span><br><span class="line">180</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">public</span> <span class="keyword">class</span> <span class="title">ConditionStudy</span> {</span><br><span class="line"></span><br><span class="line"> <span class="keyword">static</span> <span class="keyword">class</span> <span class="title">Account</span> {</span><br><span class="line"></span><br><span class="line"> <span class="keyword">private</span> <span class="keyword">long</span> balance;</span><br><span class="line"></span><br><span class="line"> <span class="keyword">private</span> Lock <span class="keyword">lock</span> = <span class="keyword">new</span> ReentrantLock();</span><br><span class="line"> <span class="keyword">private</span> Condition condition100 = <span class="keyword">lock</span>.newCondition();</span><br><span class="line"> <span class="keyword">private</span> Condition condition200 = <span class="keyword">lock</span>.newCondition();</span><br><span class="line"></span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="keyword">void</span> <span class="title">put</span>(<span class="params"><span class="keyword">long</span> money</span>) </span>{</span><br><span class="line"> <span class="keyword">lock</span>.<span class="keyword">lock</span>();</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"获得lock -- "</span> + Thread.currentThread().getName());</span><br><span class="line"> <span class="keyword">try</span> {</span><br><span class="line"> balance += money;</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"存入:"</span> + money + <span class="string">",余额:"</span> + balance + <span class="string">" -- "</span> + Thread.currentThread().getName());</span><br><span class="line"> condition200.signal();</span><br><span class="line"> } <span class="keyword">finally</span> {</span><br><span class="line"> <span class="keyword">lock</span>.unlock();</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"释放lock -- "</span> + Thread.currentThread().getName());</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="keyword">void</span> <span class="title">take</span>(<span class="params"><span class="keyword">long</span> money</span>) </span>{</span><br><span class="line"> <span class="keyword">lock</span>.<span class="keyword">lock</span>();</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"获得lock -- "</span> + Thread.currentThread().getName());</span><br><span class="line"> <span class="keyword">try</span> {</span><br><span class="line"> <span class="keyword">if</span> (balance < money)</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"余额不足, 需取: "</span> + money + <span class="string">"; 余额: "</span> + balance + <span class="string">" -- "</span> + Thread.currentThread().getName());</span><br><span class="line"> <span class="keyword">else</span> {</span><br><span class="line"> balance -= money;</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"取出:"</span> + money + <span class="string">",余额:"</span> + balance + <span class="string">" -- "</span> + Thread.currentThread().getName());</span><br><span class="line"> condition100.signal();</span><br><span class="line"> }</span><br><span class="line"> } <span class="keyword">finally</span> {</span><br><span class="line"> <span class="keyword">lock</span>.unlock();</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"释放lock -- "</span> + Thread.currentThread().getName());</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="keyword">void</span> <span class="title">put100</span>(<span class="params"></span>) </span>{</span><br><span class="line"> <span class="keyword">lock</span>.<span class="keyword">lock</span>();</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"获得lock -- "</span> + Thread.currentThread().getName());</span><br><span class="line"> <span class="keyword">try</span> {</span><br><span class="line"> <span class="keyword">while</span> (balance >= <span class="number">100</span>) {</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"余额大于等于100, 等待, 释放锁"</span> + <span class="string">" -- "</span> + Thread.currentThread().getName());</span><br><span class="line"> condition100.await();</span><br><span class="line"> }</span><br><span class="line"> balance += <span class="number">100</span>;</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"存入100, 余额:"</span> + balance + <span class="string">" -- "</span> + Thread.currentThread().getName());</span><br><span class="line"> } <span class="keyword">catch</span> (InterruptedException e) {</span><br><span class="line"> e.printStackTrace();</span><br><span class="line"> } <span class="keyword">finally</span> {</span><br><span class="line"> <span class="keyword">lock</span>.unlock();</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"释放lock -- "</span> + Thread.currentThread().getName());</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="keyword">void</span> <span class="title">take100</span>(<span class="params"></span>) </span>{</span><br><span class="line"> <span class="keyword">lock</span>.<span class="keyword">lock</span>();</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"获得lock -- "</span> + Thread.currentThread().getName());</span><br><span class="line"> <span class="keyword">try</span> {</span><br><span class="line"> <span class="keyword">while</span> (balance < <span class="number">200</span>) {</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"余额小于200, 等待, 释放锁"</span> + <span class="string">" -- "</span> + Thread.currentThread().getName());</span><br><span class="line"> condition200.await();</span><br><span class="line"> }</span><br><span class="line"> balance -= <span class="number">100</span>;</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"取出100, 余额:"</span> + balance + <span class="string">" -- "</span> + Thread.currentThread().getName());</span><br><span class="line"> } <span class="keyword">catch</span> (InterruptedException e) {</span><br><span class="line"> e.printStackTrace();</span><br><span class="line"> } <span class="keyword">finally</span> {</span><br><span class="line"> <span class="keyword">lock</span>.unlock();</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"释放lock -- "</span> + Thread.currentThread().getName());</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> <span class="keyword">static</span> <span class="keyword">class</span> <span class="title">PutThread</span> <span class="title">extends</span> <span class="title">Thread</span> {</span><br><span class="line"> <span class="keyword">private</span> Account account;</span><br><span class="line"></span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="title">PutThread</span>(<span class="params">Account account</span>) </span>{</span><br><span class="line"> <span class="keyword">this</span>.account = account;</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> @<span class="function">Override</span><br><span class="line"> <span class="keyword">public</span> <span class="keyword">void</span> <span class="title">run</span>(<span class="params"></span>) </span>{</span><br><span class="line"> Random random = <span class="keyword">new</span> Random();</span><br><span class="line"> <span class="keyword">while</span> (<span class="literal">true</span>) {</span><br><span class="line"> <span class="keyword">int</span> money = random.nextInt(<span class="number">100</span>);</span><br><span class="line"> <span class="keyword">try</span> {</span><br><span class="line"> account.put(money);</span><br><span class="line"> sleep(<span class="number">1000</span>);</span><br><span class="line"> } <span class="keyword">catch</span> (InterruptedException e) {</span><br><span class="line"> e.printStackTrace();</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> <span class="keyword">static</span> <span class="keyword">class</span> <span class="title">TakeThread</span> <span class="title">extends</span> <span class="title">Thread</span> {</span><br><span class="line"> <span class="keyword">private</span> Account account;</span><br><span class="line"></span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="title">TakeThread</span>(<span class="params">Account account</span>) </span>{</span><br><span class="line"> <span class="keyword">this</span>.account = account;</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> @<span class="function">Override</span><br><span class="line"> <span class="keyword">public</span> <span class="keyword">void</span> <span class="title">run</span>(<span class="params"></span>) </span>{</span><br><span class="line"> Random random = <span class="keyword">new</span> Random();</span><br><span class="line"> <span class="keyword">int</span> i = <span class="number">0</span>;</span><br><span class="line"> <span class="keyword">while</span> (i++ < <span class="number">10</span>) {</span><br><span class="line"> <span class="keyword">int</span> money = random.nextInt(<span class="number">100</span>);</span><br><span class="line"> <span class="keyword">try</span> {</span><br><span class="line"> account.take(money);</span><br><span class="line"> sleep(<span class="number">1000</span>);</span><br><span class="line"> } <span class="keyword">catch</span> (InterruptedException e) {</span><br><span class="line"> e.printStackTrace();</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> <span class="keyword">static</span> <span class="keyword">class</span> <span class="title">Put100Thread</span> <span class="title">extends</span> <span class="title">Thread</span> {</span><br><span class="line"> <span class="keyword">private</span> Account account;</span><br><span class="line"></span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="title">Put100Thread</span>(<span class="params">Account account</span>) </span>{</span><br><span class="line"> <span class="keyword">this</span>.account = account;</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> @<span class="function">Override</span><br><span class="line"> <span class="keyword">public</span> <span class="keyword">void</span> <span class="title">run</span>(<span class="params"></span>) </span>{</span><br><span class="line"> <span class="keyword">while</span> (<span class="literal">true</span>) {</span><br><span class="line"> <span class="keyword">try</span> {</span><br><span class="line"> account.put100();</span><br><span class="line"> sleep(<span class="number">1000</span>);</span><br><span class="line"> } <span class="keyword">catch</span> (InterruptedException e) {</span><br><span class="line"> e.printStackTrace();</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> <span class="keyword">static</span> <span class="keyword">class</span> <span class="title">Take100Thread</span> <span class="title">extends</span> <span class="title">Thread</span> {</span><br><span class="line"> <span class="keyword">private</span> Account account;</span><br><span class="line"></span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="title">Take100Thread</span>(<span class="params">Account account</span>) </span>{</span><br><span class="line"> <span class="keyword">this</span>.account = account;</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> @<span class="function">Override</span><br><span class="line"> <span class="keyword">public</span> <span class="keyword">void</span> <span class="title">run</span>(<span class="params"></span>) </span>{</span><br><span class="line"> <span class="keyword">while</span> (<span class="literal">true</span>) {</span><br><span class="line"> <span class="keyword">try</span> {</span><br><span class="line"> account.take100();</span><br><span class="line"> sleep(<span class="number">1000</span>);</span><br><span class="line"> } <span class="keyword">catch</span> (InterruptedException e) {</span><br><span class="line"> e.printStackTrace();</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="keyword">static</span> <span class="keyword">void</span> <span class="title">main</span>(<span class="params">String args[]</span>) </span>{</span><br><span class="line"> Account account = <span class="keyword">new</span> Account();</span><br><span class="line"> PutThread putThread = <span class="keyword">new</span> PutThread(account);</span><br><span class="line"> putThread.setName(<span class="string">"putThread"</span>);</span><br><span class="line"> TakeThread takeThread = <span class="keyword">new</span> TakeThread(account);</span><br><span class="line"> takeThread.setName(<span class="string">"takeThread"</span>);</span><br><span class="line"> Put100Thread put100Thread = <span class="keyword">new</span> Put100Thread(account);</span><br><span class="line"> put100Thread.setName(<span class="string">"put100Thread"</span>);</span><br><span class="line"> Take100Thread take100Thread = <span class="keyword">new</span> Take100Thread(account);</span><br><span class="line"> take100Thread.setName(<span class="string">"take100Thread"</span>);</span><br><span class="line"></span><br><span class="line"> putThread.start();</span><br><span class="line"> takeThread.start();</span><br><span class="line"> put100Thread.start();</span><br><span class="line"> take100Thread.start();</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line">}</span><br></pre></td></tr></table></figure>
<h2 id="volatile"><a href="#volatile" class="headerlink" title="volatile"></a>volatile</h2><p>volatile可以保证每次都从主内存中读取数据,且每次数据修改都写回主内存。如果一个变量声明为volatile,那么编译器和虚拟机就知道该域是可能被另一个线程并发更新的。</p>
<h2 id="死锁"><a href="#死锁" class="headerlink" title="死锁"></a>死锁</h2><p>死锁是指多个线程相互等待它方占有的资源而导致的每个线程都无法执行下去的情况。<br>下面是一个简单的死锁例子:</p>
<figure class="highlight cs"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br><span class="line">46</span><br><span class="line">47</span><br><span class="line">48</span><br><span class="line">49</span><br><span class="line">50</span><br><span class="line">51</span><br><span class="line">52</span><br><span class="line">53</span><br><span class="line">54</span><br><span class="line">55</span><br><span class="line">56</span><br><span class="line">57</span><br><span class="line">58</span><br><span class="line">59</span><br><span class="line">60</span><br><span class="line">61</span><br><span class="line">62</span><br><span class="line">63</span><br><span class="line">64</span><br><span class="line">65</span><br><span class="line">66</span><br><span class="line">67</span><br><span class="line">68</span><br><span class="line">69</span><br><span class="line">70</span><br><span class="line">71</span><br><span class="line">72</span><br><span class="line">73</span><br><span class="line">74</span><br><span class="line">75</span><br><span class="line">76</span><br><span class="line">77</span><br><span class="line">78</span><br><span class="line">79</span><br><span class="line">80</span><br><span class="line">81</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">public</span> <span class="keyword">class</span> <span class="title">DeadLock</span> {</span><br><span class="line"></span><br><span class="line"> <span class="keyword">static</span> <span class="keyword">class</span> <span class="title">MemStore</span> {</span><br><span class="line"></span><br><span class="line"> Lock lock1 = <span class="keyword">new</span> ReentrantLock();</span><br><span class="line"> Lock lock2 = <span class="keyword">new</span> ReentrantLock();</span><br><span class="line"></span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="keyword">void</span> <span class="title">method1</span>(<span class="params"></span>) </span>{</span><br><span class="line"> lock1.<span class="keyword">lock</span>();</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"获得lock1 -- "</span> + Thread.currentThread().getName());</span><br><span class="line"> <span class="keyword">try</span> {</span><br><span class="line"> Thread.sleep(<span class="number">1000</span>);</span><br><span class="line"> lock2.<span class="keyword">lock</span>();</span><br><span class="line"> <span class="keyword">try</span> {</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"获得lock2 -- "</span> + Thread.currentThread().getName());</span><br><span class="line"> } <span class="keyword">finally</span> {</span><br><span class="line"> lock2.unlock();</span><br><span class="line"> }</span><br><span class="line"> } <span class="keyword">catch</span> (InterruptedException e) {</span><br><span class="line"> e.printStackTrace();</span><br><span class="line"> } <span class="keyword">finally</span> {</span><br><span class="line"> lock1.unlock();</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="keyword">void</span> <span class="title">method2</span>(<span class="params"></span>) </span>{</span><br><span class="line"> lock2.<span class="keyword">lock</span>();</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"获得lock2 -- "</span> + Thread.currentThread().getName());</span><br><span class="line"> <span class="keyword">try</span> {</span><br><span class="line"> Thread.sleep(<span class="number">1000</span>);</span><br><span class="line"> lock1.<span class="keyword">lock</span>();</span><br><span class="line"> <span class="keyword">try</span> {</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"获得lock1 -- "</span> + Thread.currentThread().getName());</span><br><span class="line"> } <span class="keyword">finally</span> {</span><br><span class="line"> lock1.unlock();</span><br><span class="line"> }</span><br><span class="line"> } <span class="keyword">catch</span> (InterruptedException e) {</span><br><span class="line"> e.printStackTrace();</span><br><span class="line"> } <span class="keyword">finally</span> {</span><br><span class="line"> lock2.unlock();</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> <span class="keyword">static</span> <span class="keyword">class</span> <span class="title">Thread1</span> <span class="title">extends</span> <span class="title">Thread</span> {</span><br><span class="line"> <span class="keyword">private</span> MemStore memStore;</span><br><span class="line"></span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="title">Thread1</span>(<span class="params">MemStore memStore</span>) </span>{</span><br><span class="line"> <span class="keyword">this</span>.memStore = memStore;</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> @<span class="function">Override</span><br><span class="line"> <span class="keyword">public</span> <span class="keyword">void</span> <span class="title">run</span>(<span class="params"></span>)</span>{</span><br><span class="line"> memStore.method1();</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> <span class="keyword">static</span> <span class="keyword">class</span> <span class="title">Thread2</span> <span class="title">extends</span> <span class="title">Thread</span> {</span><br><span class="line"> <span class="keyword">private</span> MemStore memStore;</span><br><span class="line"></span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="title">Thread2</span>(<span class="params">MemStore memStore</span>) </span>{</span><br><span class="line"> <span class="keyword">this</span>.memStore = memStore;</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> @<span class="function">Override</span><br><span class="line"> <span class="keyword">public</span> <span class="keyword">void</span> <span class="title">run</span>(<span class="params"></span>)</span>{</span><br><span class="line"> memStore.method2();</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="keyword">static</span> <span class="keyword">void</span> <span class="title">main</span>(<span class="params">String args[]</span>) </span>{</span><br><span class="line"> MemStore memStore = <span class="keyword">new</span> MemStore();</span><br><span class="line"> Thread1 thread1 = <span class="keyword">new</span> Thread1(memStore);</span><br><span class="line"> thread1.setName(<span class="string">"thread1"</span>);</span><br><span class="line"> Thread2 thread2 = <span class="keyword">new</span> Thread2(memStore);</span><br><span class="line"> thread2.setName(<span class="string">"thread2"</span>);</span><br><span class="line"> thread1.start();</span><br><span class="line"> thread2.start();</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line">}</span><br></pre></td></tr></table></figure>
<p>输出结果:</p>
<figure class="highlight applescript"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">获得lock1 <span class="comment">-- thread1</span></span><br><span class="line">获得lock2 <span class="comment">-- thread2</span></span><br></pre></td></tr></table></figure>
<p>可以看到thread1获得了lock1,等待获得lock2,而thread2获得了lock2,等待获得lock1,从而死锁。</p>
<h1 id="线程池"><a href="#线程池" class="headerlink" title="线程池"></a>线程池</h1><p>多线程技术主要解决处理器单元内多个线程执行的问题,它可以显著减少处理器单元的闲置时间,增加处理器单元的吞吐能力。</p>
<h2 id="线程池原理"><a href="#线程池原理" class="headerlink" title="线程池原理"></a>线程池原理</h2><p>假设一个服务器完成一项任务所需时间为:T1 创建线程时间,T2 在线程中执行任务的时间,T3 销毁线程时间。如果:T1 + T3 远大于 T2,则可以采用线程池,以提高服务器性能。</p>
<p>一个线程池包括以下四个基本组成部分:</p>
<ol>
<li>线程池管理器(ThreadPool):用于创建并管理线程池,包括 创建线程池,销毁线程池,添加新任务;</li>
<li>工作线程(PoolWorker):线程池中线程,在没有任务时处于等待状态,可以循环的执行任务;</li>
<li>任务接口(Task):每个任务必须实现的接口,以供工作线程调度任务的执行,它主要规定了任务的入口,任务执行完后的收尾工作,任务的执行状态等;</li>
<li>任务队列(taskQueue):用于存放没有处理的任务。提供一种缓冲机制。</li>
</ol>
<p>线程池技术正是关注如何缩短或调整T1,T3时间的技术,从而提高服务器程序性能的。它把T1,T3分别安排在服务器程序的启动和结束的时间段或者一些空闲的时间段,这样在服务器程序处理客户请求时,不会有T1,T3的开销了。</p>
<h2 id="简单线程池的实现"><a href="#简单线程池的实现" class="headerlink" title="简单线程池的实现"></a>简单线程池的实现</h2><figure class="highlight java"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br><span class="line">46</span><br><span class="line">47</span><br><span class="line">48</span><br><span class="line">49</span><br><span class="line">50</span><br><span class="line">51</span><br><span class="line">52</span><br><span class="line">53</span><br><span class="line">54</span><br><span class="line">55</span><br><span class="line">56</span><br><span class="line">57</span><br><span class="line">58</span><br><span class="line">59</span><br><span class="line">60</span><br><span class="line">61</span><br><span class="line">62</span><br><span class="line">63</span><br><span class="line">64</span><br><span class="line">65</span><br><span class="line">66</span><br><span class="line">67</span><br><span class="line">68</span><br><span class="line">69</span><br><span class="line">70</span><br><span class="line">71</span><br><span class="line">72</span><br><span class="line">73</span><br><span class="line">74</span><br><span class="line">75</span><br><span class="line">76</span><br><span class="line">77</span><br><span class="line">78</span><br><span class="line">79</span><br><span class="line">80</span><br><span class="line">81</span><br><span class="line">82</span><br><span class="line">83</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">public</span> <span class="class"><span class="keyword">class</span> <span class="title">SimpleThreadPool</span> </span>{</span><br><span class="line"></span><br><span class="line"> <span class="comment">// 线程数</span></span><br><span class="line"> <span class="keyword">private</span> <span class="keyword">int</span> threadNum = <span class="number">10</span>;</span><br><span class="line"> <span class="comment">// 工作线程</span></span><br><span class="line"> <span class="keyword">private</span> WorkThread[] workThreads;</span><br><span class="line"> <span class="comment">// 任务队列, 待执行的线程</span></span><br><span class="line"> <span class="keyword">private</span> BlockingQueue<Runnable> taskQueue;</span><br><span class="line"></span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="title">SimpleThreadPool</span><span class="params">(<span class="keyword">int</span> threadNum)</span> </span>{</span><br><span class="line"> <span class="keyword">if</span> (threadNum > <span class="number">0</span>)</span><br><span class="line"> <span class="keyword">this</span>.threadNum = threadNum;</span><br><span class="line"> <span class="comment">// 初始化任务队列</span></span><br><span class="line"> taskQueue = <span class="keyword">new</span> LinkedBlockingDeque<>();</span><br><span class="line"> <span class="comment">// 初始化工作线程</span></span><br><span class="line"> workThreads = <span class="keyword">new</span> WorkThread[<span class="keyword">this</span>.threadNum];</span><br><span class="line"> <span class="keyword">int</span> i = <span class="number">0</span>;</span><br><span class="line"> <span class="keyword">while</span> (i < threadNum) {</span><br><span class="line"> workThreads[i] = <span class="keyword">new</span> WorkThread();</span><br><span class="line"> workThreads[i].setName(<span class="string">"workThread-"</span> + i);</span><br><span class="line"> <span class="comment">// 启动工作线程</span></span><br><span class="line"> workThreads[i].start();</span><br><span class="line"> i++;</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="keyword">void</span> <span class="title">execute</span><span class="params">(Runnable runnable)</span> </span>{</span><br><span class="line"> taskQueue.add(runnable);</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="keyword">void</span> <span class="title">execute</span><span class="params">(Runnable[] runnableList)</span> </span>{</span><br><span class="line"> <span class="keyword">for</span> (Runnable runnable : runnableList)</span><br><span class="line"> execute(runnable);</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="keyword">void</span> <span class="title">destroy</span><span class="params">()</span></span>{</span><br><span class="line"> <span class="keyword">for</span>(WorkThread workThread: workThreads)</span><br><span class="line"> workThread.stopRun();</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> <span class="class"><span class="keyword">class</span> <span class="title">WorkThread</span> <span class="keyword">extends</span> <span class="title">Thread</span> </span>{</span><br><span class="line"> <span class="keyword">private</span> <span class="keyword">volatile</span> <span class="keyword">boolean</span> runFlag = <span class="keyword">true</span>;</span><br><span class="line"></span><br><span class="line"> <span class="meta">@Override</span></span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="keyword">void</span> <span class="title">run</span><span class="params">()</span> </span>{</span><br><span class="line"> <span class="keyword">while</span> (runFlag)</span><br><span class="line"> <span class="keyword">try</span> {</span><br><span class="line"> Runnable task = taskQueue.take();</span><br><span class="line"> task.run();</span><br><span class="line"> } <span class="keyword">catch</span> (InterruptedException e) {</span><br><span class="line"> e.printStackTrace();</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="keyword">void</span> <span class="title">stopRun</span><span class="params">()</span> </span>{</span><br><span class="line"> runFlag = <span class="keyword">false</span>;</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> <span class="keyword">private</span> <span class="keyword">static</span> <span class="class"><span class="keyword">class</span> <span class="title">Task</span> <span class="keyword">implements</span> <span class="title">Runnable</span> </span>{</span><br><span class="line"></span><br><span class="line"> <span class="keyword">private</span> String name;</span><br><span class="line"></span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="title">Task</span><span class="params">(String name)</span> </span>{</span><br><span class="line"> <span class="keyword">this</span>.name = name;</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> <span class="meta">@Override</span></span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="keyword">void</span> <span class="title">run</span><span class="params">()</span> </span>{</span><br><span class="line"> System.out.println(name + <span class="string">" run."</span> + <span class="string">"current thread: "</span> + Thread.currentThread().getName());</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="keyword">static</span> <span class="keyword">void</span> <span class="title">main</span><span class="params">(String args[])</span> <span class="keyword">throws</span> InterruptedException </span>{</span><br><span class="line"> SimpleThreadPool simpleThreadPool = <span class="keyword">new</span> SimpleThreadPool(<span class="number">3</span>);</span><br><span class="line"> simpleThreadPool.execute(<span class="keyword">new</span> Task(<span class="string">"task0"</span>));</span><br><span class="line"> simpleThreadPool.execute(<span class="keyword">new</span> Runnable[]{<span class="keyword">new</span> Task(<span class="string">"task1"</span>), <span class="keyword">new</span> Task(<span class="string">"task2"</span>), <span class="keyword">new</span> Task(<span class="string">"task3"</span>), <span class="keyword">new</span> Task(<span class="string">"task4"</span>)});</span><br><span class="line"> simpleThreadPool.execute(<span class="keyword">new</span> Runnable[]{<span class="keyword">new</span> Task(<span class="string">"task4"</span>), <span class="keyword">new</span> Task(<span class="string">"task5"</span>), <span class="keyword">new</span> Task(<span class="string">"task6"</span>)});</span><br><span class="line"> Thread.sleep(<span class="number">1000</span>);</span><br><span class="line"> simpleThreadPool.destroy();</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line">}</span><br></pre></td></tr></table></figure>
<p>输出结果(不固定):</p>
<figure class="highlight stylus"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br></pre></td><td class="code"><pre><span class="line">task0 run<span class="selector-class">.current</span> thread: workThread-<span class="number">0</span></span><br><span class="line">task2 run<span class="selector-class">.current</span> thread: workThread-<span class="number">2</span></span><br><span class="line">task1 run<span class="selector-class">.current</span> thread: workThread-<span class="number">1</span></span><br><span class="line">task3 run<span class="selector-class">.current</span> thread: workThread-<span class="number">0</span></span><br><span class="line">task4 run<span class="selector-class">.current</span> thread: workThread-<span class="number">2</span></span><br><span class="line">task4 run<span class="selector-class">.current</span> thread: workThread-<span class="number">1</span></span><br><span class="line">task5 run<span class="selector-class">.current</span> thread: workThread-<span class="number">0</span></span><br><span class="line">task6 run<span class="selector-class">.current</span> thread: workThread-<span class="number">2</span></span><br></pre></td></tr></table></figure>
<h1 id="concurrent包"><a href="#concurrent包" class="headerlink" title="concurrent包"></a>concurrent包</h1><p>java.util.concurrent包是在并发编程中很常用的实用工具包。此包包括了几个小的、已标准化的可扩展框架,以及一些提供有用功能的类,没有这些类,这些功能会很难实现或实现起来冗长乏味。下面简要描述主要的组件。</p>
<h2 id="执行器"><a href="#执行器" class="headerlink" title="执行器"></a>执行器</h2><p>接口。Executor是一个简单的标准化接口,用于定义类似于线程的自定义子系统,包括线程池、异步IO和轻量级任务框架。根据所使用的具体Executor类的不同,可能在新创建的线程中,现有的任务执行线程中,或者调用execute()的线程中执行任务,并且可能顺序或并发执行。ExecutorService 提供了多个完整的异步任务执行框架。ExecutorService 管理任务的排队和安排,并允许受控制的关闭。ScheduledExecutorService 子接口及相关的接口添加了对延迟的和定期任务执行的支持。ExecutorService 提供了安排异步执行的方法,可执行由 Callable 表示的任何函数,结果类似于 Runnable。Future 返回函数的结果,允许确定执行是否完成,并提供取消执行的方法。RunnableFuture 是拥有 run 方法的 Future,run 方法执行时将设置其结果。</p>
<p>实现。类 ThreadPoolExecutor 和 ScheduledThreadPoolExecutor 提供可调的、灵活的线程池。Executors 类提供大多数 Executor 的常见类型和配置的工厂方法,以及使用它们的几种实用工具方法。其他基于 Executor 的实用工具包括具体类 FutureTask,它提供 Future 的常见可扩展实现,以及 ExecutorCompletionService,它有助于协调对异步任务组的处理。</p>
<h2 id="队列"><a href="#队列" class="headerlink" title="队列"></a>队列</h2><p>java.util.concurrent.ConcurrentLinkedQueue类提供了高效的、可伸缩的、线程安全的非阻塞 FIFO 队列。java.util.concurrent 中的五个实现都支持扩展的 BlockingQueue 接口,该接口定义了 put 和 take 的阻塞版本:LinkedBlockingQueue、ArrayBlockingQueue、SynchronousQueue、PriorityBlockingQueue 和 DelayQueue。这些不同的类覆盖了生产者-使用者、消息传递、并行任务执行和相关并发设计的大多数常见使用的上下文。BlockingDeque 接口扩展 BlockingQueue,以支持 FIFO 和 LIFO(基于堆栈)操作。LinkedBlockingDeque 类提供一个实现。</p>
<h2 id="计时"><a href="#计时" class="headerlink" title="计时"></a>计时</h2><p>TimeUnit 类为指定和控制基于超时的操作提供了多重粒度(包括纳秒级)。该包中的大多数类除了包含不确定的等待之外,还包含基于超时的操作。在使用超时的所有情况中,超时指定了在表明已超时前该方法应该等待的最少时间。在超时发生后,实现会“尽力”检测超时。但是,在检测超时与超时之后再次实际执行线程之间可能要经过不确定的时间。接受超时期参数的所有方法将小于等于 0 的值视为根本不会等待。要“永远”等待,可以使用 Long.MAX_VALUE 值。</p>
<h2 id="同步器"><a href="#同步器" class="headerlink" title="同步器"></a>同步器</h2><p>四个类可协助实现常见的专用同步语句。Semaphore 是一个经典的并发工具。CountDownLatch 是一个极其简单但又极其常用的实用工具,用于在保持给定数目的信号、事件或条件前阻塞执行。CyclicBarrier 是一个可重置的多路同步点,在某些并行编程风格中很有用。Exchanger 允许两个线程在 collection 点交换对象,它在多流水线设计中是有用的。</p>
<h2 id="并发-Collection"><a href="#并发-Collection" class="headerlink" title="并发 Collection"></a>并发 Collection</h2><p>除队列外,此包还提供了设计用于多线程上下文中的 Collection 实现:ConcurrentHashMap、ConcurrentSkipListMap、ConcurrentSkipListSet、CopyOnWriteArrayList 和 CopyOnWriteArraySet。当期望许多线程访问一个给定 collection 时,ConcurrentHashMap 通常优于同步的 HashMap,ConcurrentSkipListMap 通常优于同步的 TreeMap。当期望的读数和遍历远远大于列表的更新数时,CopyOnWriteArrayList 优于同步的 ArrayList。<br>此包中与某些类一起使用的“Concurrent&rdquo前缀;是一种简写,表明与类似的“同步”类有所不同。例如,java.util.Hashtable 和 Collections.synchronizedMap(new HashMap()) 是同步的,但 ConcurrentHashMap 则是“并发的”。并发 collection 是线程安全的,但是不受单个排他锁的管理。在 ConcurrentHashMap 这一特定情况下,它可以安全地允许进行任意数目的并发读取,以及数目可调的并发写入。需要通过单个锁不允许对 collection 的所有访问时,“同步”类是很有用的,其代价是较差的可伸缩性。在期望多个线程访问公共 collection 的其他情况中,通常“并发”版本要更好一些。当 collection 是未共享的,或者仅保持其他锁时 collection 是可访问的情况下,非同步 collection 则要更好一些。</p>
<p>大多数并发 Collection 实现(包括大多数 Queue)与常规的 java.util 约定也不同,因为它们的迭代器提供了弱一致的,而不是快速失败的遍历。弱一致的迭代器是线程安全的,但是在迭代时没有必要冻结 collection,所以它不一定反映自迭代器创建以来的所有更新。</p>
<h2 id="内存一致性属性"><a href="#内存一致性属性" class="headerlink" title="内存一致性属性"></a>内存一致性属性</h2><p>只有写入操作 happen-before 读取操作时,才保证一个线程写入的结果对另一个线程的读取是可视的。synchronized 和 volatile 构造 happen-before 关系,Thread.start() 和 Thread.join() 方法形成 happen-before 关系。尤其是:<br>线程中的每个操作 happen-before 稍后按程序顺序传入的该线程中的每个操作。<br>一个解除锁监视器的(synchronized 阻塞或方法退出)happen-before 相同监视器的每个后续锁(synchronized 阻塞或方法进入)。并且因为 happen-before 关系是可传递的,所以解除锁定之前的线程的所有操作 happen-before 锁定该监视器的任何线程后续的所有操作。<br>写入 volatile 字段 happen-before 每个后续读取相同字段。volatile 字段的读取和写入与进入和退出监视器具有相似的内存一致性效果,但不 需要互斥锁。<br>在线程上调用 start happen-before 已启动的线程中的任何线程。<br>线程中的所有操作 happen-before 从该线程上的 join 成功返回的任何其他线程。<br>java.util.concurrent 中所有类的方法及其子包扩展了这些对更高级别同步的保证。尤其是:<br>线程中将一个对象放入任何并发 collection 之前的操作 happen-before 从另一线程中的 collection 访问或移除该元素的后续操作。<br>线程中向 Executor 提交 Runnable 之前的操作 happen-before 其执行开始。同样适用于向 ExecutorService 提交 Callables。<br>异步计算(由 Future 表示)所采取的操作 happen-before 通过另一线程中 Future.get() 获取结果后续的操作。<br>“释放”同步储存方法(如 Lock.unlock、Semaphore.release 和 CountDownLatch.countDown)之前的操作 happen-before 另一线程中相同同步储存对象成功“获取”方法(如 Lock.lock、Semaphore.acquire、Condition.await 和 CountDownLatch.await)的后续操作。<br>对于通过 Exchanger 成功交换对象的每个线程对,每个线程中 exchange() 之前的操作 happen-before 另一线程中对应 exchange() 后续的操作。<br>调用 CyclicBarrier.await 之前的操作 happen-before 屏障操作所执行的操作,屏障操作所执行的操作 happen-before 从另一线程中对应 await 成功返回的后续操作。</p>
<h1 id="关于线程个数"><a href="#关于线程个数" class="headerlink" title="关于线程个数"></a>关于线程个数</h1><p>我们的应用程序应该创建多少个线程会使得性能得到比较好的提升呢?以下代码可以计算一个合适的线程个数:</p>
<figure class="highlight cs"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br><span class="line">31</span><br><span class="line">32</span><br><span class="line">33</span><br><span class="line">34</span><br><span class="line">35</span><br><span class="line">36</span><br><span class="line">37</span><br><span class="line">38</span><br><span class="line">39</span><br><span class="line">40</span><br><span class="line">41</span><br><span class="line">42</span><br><span class="line">43</span><br><span class="line">44</span><br><span class="line">45</span><br><span class="line">46</span><br><span class="line">47</span><br><span class="line">48</span><br><span class="line">49</span><br><span class="line">50</span><br><span class="line">51</span><br><span class="line">52</span><br><span class="line">53</span><br><span class="line">54</span><br><span class="line">55</span><br><span class="line">56</span><br><span class="line">57</span><br><span class="line">58</span><br><span class="line">59</span><br><span class="line">60</span><br><span class="line">61</span><br><span class="line">62</span><br><span class="line">63</span><br><span class="line">64</span><br><span class="line">65</span><br><span class="line">66</span><br><span class="line">67</span><br><span class="line">68</span><br><span class="line">69</span><br><span class="line">70</span><br><span class="line">71</span><br><span class="line">72</span><br><span class="line">73</span><br><span class="line">74</span><br><span class="line">75</span><br><span class="line">76</span><br><span class="line">77</span><br><span class="line">78</span><br><span class="line">79</span><br><span class="line">80</span><br><span class="line">81</span><br><span class="line">82</span><br><span class="line">83</span><br><span class="line">84</span><br><span class="line">85</span><br><span class="line">86</span><br><span class="line">87</span><br><span class="line">88</span><br><span class="line">89</span><br><span class="line">90</span><br><span class="line">91</span><br><span class="line">92</span><br><span class="line">93</span><br><span class="line">94</span><br><span class="line">95</span><br><span class="line">96</span><br><span class="line">97</span><br><span class="line">98</span><br><span class="line">99</span><br><span class="line">100</span><br><span class="line">101</span><br><span class="line">102</span><br><span class="line">103</span><br><span class="line">104</span><br><span class="line">105</span><br><span class="line">106</span><br><span class="line">107</span><br><span class="line">108</span><br><span class="line">109</span><br><span class="line">110</span><br><span class="line">111</span><br><span class="line">112</span><br><span class="line">113</span><br><span class="line">114</span><br><span class="line">115</span><br><span class="line">116</span><br><span class="line">117</span><br><span class="line">118</span><br><span class="line">119</span><br><span class="line">120</span><br><span class="line">121</span><br><span class="line">122</span><br><span class="line">123</span><br><span class="line">124</span><br><span class="line">125</span><br><span class="line">126</span><br><span class="line">127</span><br><span class="line">128</span><br><span class="line">129</span><br><span class="line">130</span><br><span class="line">131</span><br><span class="line">132</span><br><span class="line">133</span><br><span class="line">134</span><br><span class="line">135</span><br><span class="line">136</span><br><span class="line">137</span><br><span class="line">138</span><br><span class="line">139</span><br><span class="line">140</span><br><span class="line">141</span><br><span class="line">142</span><br><span class="line">143</span><br><span class="line">144</span><br><span class="line">145</span><br><span class="line">146</span><br><span class="line">147</span><br><span class="line">148</span><br><span class="line">149</span><br><span class="line">150</span><br><span class="line">151</span><br><span class="line">152</span><br><span class="line">153</span><br><span class="line">154</span><br><span class="line">155</span><br><span class="line">156</span><br><span class="line">157</span><br><span class="line">158</span><br><span class="line">159</span><br><span class="line">160</span><br><span class="line">161</span><br><span class="line">162</span><br><span class="line">163</span><br><span class="line">164</span><br><span class="line">165</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">public</span> <span class="keyword">abstract</span> <span class="keyword">class</span> <span class="title">PoolSizeCalculator</span> {</span><br><span class="line"></span><br><span class="line"> <span class="comment">/**</span><br><span class="line"> * The sample queue size to calculate the size of a single {@link Runnable} element.</span><br><span class="line"> */</span></span><br><span class="line"> <span class="keyword">private</span> final <span class="keyword">int</span> SAMPLE_QUEUE_SIZE = <span class="number">1000</span>;</span><br><span class="line"></span><br><span class="line"> <span class="comment">/**</span><br><span class="line"> * Accuracy of test run. It must finish within 20ms of the testTime otherwise we retry the test. This could be</span><br><span class="line"> * configurable.</span><br><span class="line"> */</span></span><br><span class="line"> <span class="keyword">private</span> final <span class="keyword">int</span> EPSYLON = <span class="number">20</span>;</span><br><span class="line"></span><br><span class="line"> <span class="comment">/**</span><br><span class="line"> * Control variable for the CPU time investigation.</span><br><span class="line"> */</span></span><br><span class="line"> <span class="keyword">private</span> <span class="keyword">volatile</span> boolean expired;</span><br><span class="line"></span><br><span class="line"> <span class="comment">/**</span><br><span class="line"> * Time (millis) of the test run in the CPU time calculation.</span><br><span class="line"> */</span></span><br><span class="line"> <span class="keyword">private</span> final <span class="keyword">long</span> testtime = <span class="number">3000</span>;</span><br><span class="line"></span><br><span class="line"> <span class="comment">/**</span><br><span class="line"> * Calculates the boundaries of a thread pool for a given {@link Runnable}.</span><br><span class="line"> *</span><br><span class="line"> * @param targetUtilization the desired utilization of the CPUs (0 <= targetUtilization <= 1)</span><br><span class="line"> * @param targetQueueSizeBytes the desired maximum work queue size of the thread pool (bytes)</span><br><span class="line"> */</span></span><br><span class="line"> <span class="function"><span class="keyword">protected</span> <span class="keyword">void</span> <span class="title">calculateBoundaries</span>(<span class="params">BigDecimal targetUtilization, BigDecimal targetQueueSizeBytes</span>) </span>{</span><br><span class="line"> calculateOptimalCapacity(targetQueueSizeBytes);</span><br><span class="line"> Runnable task = createTask();</span><br><span class="line"> start(task);</span><br><span class="line"> start(task); <span class="comment">// warm up phase</span></span><br><span class="line"> <span class="keyword">long</span> cputime = getCurrentThreadCPUTime();</span><br><span class="line"> start(task); <span class="comment">// test intervall</span></span><br><span class="line"> cputime = getCurrentThreadCPUTime() - cputime;</span><br><span class="line"> <span class="keyword">long</span> waittime = (testtime * <span class="number">1000000</span>) - cputime;</span><br><span class="line"> calculateOptimalThreadCount(cputime, waittime, targetUtilization);</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> <span class="function"><span class="keyword">private</span> <span class="keyword">void</span> <span class="title">calculateOptimalCapacity</span>(<span class="params">BigDecimal targetQueueSizeBytes</span>) </span>{</span><br><span class="line"> <span class="keyword">long</span> mem = calculateMemoryUsage();</span><br><span class="line"> BigDecimal queueCapacity = targetQueueSizeBytes.divide(<span class="keyword">new</span> BigDecimal(mem), RoundingMode.HALF_UP);</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"Target queue memory usage (bytes): "</span> + targetQueueSizeBytes);</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"createTask() produced "</span> + createTask().getClass().getName() + <span class="string">" which took "</span> + mem</span><br><span class="line"> + <span class="string">" bytes in a queue"</span>);</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"Formula: "</span> + targetQueueSizeBytes + <span class="string">" / "</span> + mem);</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"* Recommended queue capacity (bytes): "</span> + queueCapacity);</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> <span class="comment">/**</span><br><span class="line"> * <NAME>' optimal thread count formula, see 'Java Concurrency in Practice' (chapter 8.2)</span><br><span class="line"> *</span><br><span class="line"> * @param cpu cpu time consumed by considered task</span><br><span class="line"> * @param wait wait time of considered task</span><br><span class="line"> * @param targetUtilization target utilization of the system</span><br><span class="line"> */</span></span><br><span class="line"> <span class="function"><span class="keyword">private</span> <span class="keyword">void</span> <span class="title">calculateOptimalThreadCount</span>(<span class="params"><span class="keyword">long</span> cpu, <span class="keyword">long</span> wait, BigDecimal targetUtilization</span>) </span>{</span><br><span class="line"> BigDecimal waitTime = <span class="keyword">new</span> BigDecimal(wait);</span><br><span class="line"> BigDecimal computeTime = <span class="keyword">new</span> BigDecimal(cpu);</span><br><span class="line"> BigDecimal numberOfCPU = <span class="keyword">new</span> BigDecimal(Runtime.getRuntime().availableProcessors());</span><br><span class="line"> BigDecimal optimalthreadcount = numberOfCPU.multiply(targetUtilization).multiply(</span><br><span class="line"> <span class="keyword">new</span> BigDecimal(<span class="number">1</span>).add(waitTime.divide(computeTime, RoundingMode.HALF_UP)));</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"Number of CPU: "</span> + numberOfCPU);</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"Target utilization: "</span> + targetUtilization);</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"Elapsed time (nanos): "</span> + (testtime * <span class="number">1000000</span>));</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"Compute time (nanos): "</span> + cpu);</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"Wait time (nanos): "</span> + wait);</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"Formula: "</span> + numberOfCPU + <span class="string">" * "</span> + targetUtilization + <span class="string">" * (1 + "</span> + waitTime + <span class="string">" / "</span></span><br><span class="line"> + computeTime + <span class="string">")"</span>);</span><br><span class="line"> System.<span class="keyword">out</span>.println(<span class="string">"* Optimal thread count: "</span> + optimalthreadcount);</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> <span class="comment">/**</span><br><span class="line"> * Runs the {@link Runnable} over a period defined in {@link #testtime}. Based on <NAME>' ideas</span><br><span class="line"> * (http://www.javaspecialists.eu/archive/Issue124.html).</span><br><span class="line"> *</span><br><span class="line"> * @param task the runnable under investigation</span><br><span class="line"> */</span></span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="keyword">void</span> <span class="title">start</span>(<span class="params">Runnable task</span>) </span>{</span><br><span class="line"> <span class="keyword">long</span> start = <span class="number">0</span>;</span><br><span class="line"> <span class="keyword">int</span> runs = <span class="number">0</span>;</span><br><span class="line"> <span class="keyword">do</span> {</span><br><span class="line"> <span class="keyword">if</span> (++runs > <span class="number">5</span>) {</span><br><span class="line"> <span class="keyword">throw</span> <span class="keyword">new</span> IllegalStateException(<span class="string">"Test not accurate"</span>);</span><br><span class="line"> }</span><br><span class="line"> expired = <span class="literal">false</span>;</span><br><span class="line"> start = System.currentTimeMillis();</span><br><span class="line"> Timer timer = <span class="keyword">new</span> Timer();</span><br><span class="line"> timer.schedule(<span class="keyword">new</span> TimerTask() {</span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="keyword">void</span> <span class="title">run</span>(<span class="params"></span>) </span>{</span><br><span class="line"> expired = <span class="literal">true</span>;</span><br><span class="line"> }</span><br><span class="line"> }, testtime);</span><br><span class="line"> <span class="keyword">while</span> (!expired) {</span><br><span class="line"> task.run();</span><br><span class="line"> }</span><br><span class="line"> start = System.currentTimeMillis() - start;</span><br><span class="line"> timer.cancel();</span><br><span class="line"> } <span class="keyword">while</span> (Math.abs(start - testtime) > EPSYLON);</span><br><span class="line"> collectGarbage(<span class="number">3</span>);</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> <span class="function"><span class="keyword">private</span> <span class="keyword">void</span> <span class="title">collectGarbage</span>(<span class="params"><span class="keyword">int</span> times</span>) </span>{</span><br><span class="line"> <span class="keyword">for</span> (<span class="keyword">int</span> i = <span class="number">0</span>; i < times; i++) {</span><br><span class="line"> System.gc();</span><br><span class="line"> <span class="keyword">try</span> {</span><br><span class="line"> Thread.sleep(<span class="number">10</span>);</span><br><span class="line"> } <span class="keyword">catch</span> (InterruptedException e) {</span><br><span class="line"> Thread.currentThread().interrupt();</span><br><span class="line"> <span class="keyword">break</span>;</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> <span class="comment">/**</span><br><span class="line"> * Calculates the memory usage of a single element in a work queue. Based on <NAME>' ideas</span><br><span class="line"> * (http://www.javaspecialists.eu/archive/Issue029.html).</span><br><span class="line"> *</span><br><span class="line"> * @return memory usage of a single {@link Runnable} element in the thread pools work queue</span><br><span class="line"> */</span></span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="keyword">long</span> <span class="title">calculateMemoryUsage</span>(<span class="params"></span>) </span>{</span><br><span class="line"> BlockingQueue<Runnable> queue = createWorkQueue();</span><br><span class="line"> <span class="keyword">for</span> (<span class="keyword">int</span> i = <span class="number">0</span>; i < SAMPLE_QUEUE_SIZE; i++) {</span><br><span class="line"> queue.add(createTask());</span><br><span class="line"> }</span><br><span class="line"> <span class="keyword">long</span> mem0 = Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory();</span><br><span class="line"> <span class="keyword">long</span> mem1 = Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory();</span><br><span class="line"> queue = <span class="literal">null</span>;</span><br><span class="line"> collectGarbage(<span class="number">15</span>);</span><br><span class="line"> mem0 = Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory();</span><br><span class="line"> queue = createWorkQueue();</span><br><span class="line"> <span class="keyword">for</span> (<span class="keyword">int</span> i = <span class="number">0</span>; i < SAMPLE_QUEUE_SIZE; i++) {</span><br><span class="line"> queue.add(createTask());</span><br><span class="line"> }</span><br><span class="line"> collectGarbage(<span class="number">15</span>);</span><br><span class="line"> mem1 = Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory();</span><br><span class="line"> <span class="keyword">return</span> (mem1 - mem0) / SAMPLE_QUEUE_SIZE;</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> <span class="comment">/**</span><br><span class="line"> * Create your runnable task here.</span><br><span class="line"> *</span><br><span class="line"> * @return an instance of your runnable task under investigation</span><br><span class="line"> */</span></span><br><span class="line"> <span class="function"><span class="keyword">protected</span> <span class="keyword">abstract</span> Runnable <span class="title">createTask</span>(<span class="params"></span>)</span>;</span><br><span class="line"></span><br><span class="line"> <span class="comment">/**</span><br><span class="line"> * Return an instance of the queue used in the thread pool.</span><br><span class="line"> *</span><br><span class="line"> * @return queue instance</span><br><span class="line"> */</span></span><br><span class="line"> <span class="function"><span class="keyword">protected</span> <span class="keyword">abstract</span> BlockingQueue<Runnable> <span class="title">createWorkQueue</span>(<span class="params"></span>)</span>;</span><br><span class="line"></span><br><span class="line"> <span class="comment">/**</span><br><span class="line"> * Calculate current cpu time. Various frameworks may be used here, depending on the operating system in use. (e.g.</span><br><span class="line"> * http://www.hyperic.com/products/sigar). The more accurate the CPU time measurement, the more accurate the results</span><br><span class="line"> * for thread count boundaries.</span><br><span class="line"> *</span><br><span class="line"> * @return current cpu time of current thread</span><br><span class="line"> */</span></span><br><span class="line"> <span class="function"><span class="keyword">protected</span> <span class="keyword">abstract</span> <span class="keyword">long</span> <span class="title">getCurrentThreadCPUTime</span>(<span class="params"></span>)</span>;</span><br><span class="line"></span><br><span class="line">}</span><br></pre></td></tr></table></figure>
<figure class="highlight java"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br><span class="line">20</span><br><span class="line">21</span><br><span class="line">22</span><br><span class="line">23</span><br><span class="line">24</span><br><span class="line">25</span><br><span class="line">26</span><br><span class="line">27</span><br><span class="line">28</span><br><span class="line">29</span><br><span class="line">30</span><br></pre></td><td class="code"><pre><span class="line"><span class="keyword">public</span> <span class="class"><span class="keyword">class</span> <span class="title">MyPoolSizeCalculator</span> <span class="keyword">extends</span> <span class="title">PoolSizeCalculator</span> </span>{</span><br><span class="line"></span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="keyword">static</span> <span class="keyword">void</span> <span class="title">main</span><span class="params">(String[] args)</span> <span class="keyword">throws</span> InterruptedException,</span><br><span class="line"> InstantiationException,</span><br><span class="line"> IllegalAccessException,</span><br><span class="line"> ClassNotFoundException </span>{</span><br><span class="line"> MyPoolSizeCalculator calculator = <span class="keyword">new</span> MyPoolSizeCalculator();</span><br><span class="line"> calculator.calculateBoundaries(<span class="keyword">new</span> BigDecimal(<span class="number">1.0</span>),</span><br><span class="line"> <span class="keyword">new</span> BigDecimal(<span class="number">100000</span>));</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> <span class="function"><span class="keyword">protected</span> <span class="keyword">long</span> <span class="title">getCurrentThreadCPUTime</span><span class="params">()</span> </span>{</span><br><span class="line"> <span class="keyword">return</span> ManagementFactory.getThreadMXBean().getCurrentThreadCpuTime();</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> <span class="function"><span class="keyword">protected</span> Runnable <span class="title">createTask</span><span class="params">()</span> </span>{</span><br><span class="line"> <span class="keyword">return</span> <span class="keyword">new</span> Runnable() {</span><br><span class="line"> <span class="meta">@Override</span></span><br><span class="line"> <span class="function"><span class="keyword">public</span> <span class="keyword">void</span> <span class="title">run</span><span class="params">()</span> </span>{</span><br><span class="line"> <span class="keyword">for</span> (<span class="keyword">int</span> i = <span class="number">0</span>; i < <span class="number">10000</span>; i++)</span><br><span class="line"> System.out.println(i);</span><br><span class="line"> }</span><br><span class="line"> };</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line"> <span class="function"><span class="keyword">protected</span> BlockingQueue<Runnable> <span class="title">createWorkQueue</span><span class="params">()</span> </span>{</span><br><span class="line"> <span class="keyword">return</span> <span class="keyword">new</span> LinkedBlockingQueue<>();</span><br><span class="line"> }</span><br><span class="line"></span><br><span class="line">}</span><br></pre></td></tr></table></figure>
<p><code>createTask的方法根据自己的实际需要进行修改。</code></p>
<p>得到类似如下结果 :</p>
<figure class="highlight applescript"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br></pre></td><td class="code"><pre><span class="line">Number <span class="keyword">of</span> CPU: <span class="number">4</span></span><br><span class="line">Target utilization: <span class="number">1</span></span><br><span class="line">Elapsed <span class="built_in">time</span> (nanos): <span class="number">3000000000</span></span><br><span class="line">Compute <span class="built_in">time</span> (nanos): <span class="number">2017495000</span></span><br><span class="line">Wait <span class="built_in">time</span> (nanos): <span class="number">982505000</span></span><br><span class="line">Formula: <span class="number">4</span> * <span class="number">1</span> * (<span class="number">1</span> + <span class="number">982505000</span> / <span class="number">2017495000</span>)</span><br><span class="line">* Optimal thread <span class="built_in">count</span>: <span class="number">4</span></span><br></pre></td></tr></table></figure>
<p>由此可知,在上述场景中,创建4个线程比较合适。</p>
</div><script type="text/javascript" src="/js/share.js?v=0.0.0" async></script><a data-url="http://jackalope.cn/2016/04/19/java-core-thread/" data-id="ciz86h6q6001sk0ekljwwmtxf" class="article-share-link">分享到</a><div class="tags"><a href="/tags/Java/">Java</a><a href="/tags/多线程/">多线程</a></div><div class="post-nav"><a href="/2016/04/19/java-core-io/" class="pre">IO总结</a><a href="/2016/04/14/java-core-object/" class="next">Object类详解</a></div><div id="disqus_thread"><script>var disqus_shortname = 'hedgehog-zowie';
var disqus_identifier = '2016/04/19/java-core-thread/';
var disqus_title = '多线程总结';
var disqus_url = 'http://jackalope.cn/2016/04/19/java-core-thread/';
(function() {
var dsq = document.createElement('script'); dsq.type = 'text/javascript'; dsq.async = true;
dsq.src = '//' + disqus_shortname + '.disqus.com/embed.js';
(document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq);
})();</script><script id="dsq-count-scr" src="//hedgehog-zowie.disqus.com/count.js" async></script></div></div></div></div><div class="pure-u-1-4"><div id="sidebar"><div class="widget"><form action="//www.google.com/search" method="get" accept-charset="utf-8" target="_blank" class="search-form"><input type="text" name="q" maxlength="20" placeholder="Search"/><input type="hidden" name="sitesearch" value="http://jackalope.cn"/></form></div><div class="widget"><div class="widget-title"><i class="fa fa-folder-o"> 分类</i></div><ul class="category-list"><li class="category-list-item"><a class="category-list-link" href="/categories/Flume/">Flume</a></li><li class="category-list-item"><a class="category-list-link" href="/categories/HBase/">HBase</a></li><li class="category-list-item"><a class="category-list-link" href="/categories/Java基础/">Java基础</a></li><li class="category-list-item"><a class="category-list-link" href="/categories/Java总结/">Java总结</a></li><li class="category-list-item"><a class="category-list-link" href="/categories/hadoop/">hadoop</a></li><li class="category-list-item"><a class="category-list-link" href="/categories/hbase/">hbase</a></li><li class="category-list-item"><a class="category-list-link" href="/categories/hexo/">hexo</a></li><li class="category-list-item"><a class="category-list-link" href="/categories/hive/">hive</a></li><li class="category-list-item"><a class="category-list-link" href="/categories/java基础/">java基础</a></li><li class="category-list-item"><a class="category-list-link" href="/categories/js/">js</a></li><li class="category-list-item"><a class="category-list-link" href="/categories/mysql/">mysql</a></li><li class="category-list-item"><a class="category-list-link" href="/categories/shell/">shell</a></li><li class="category-list-item"><a class="category-list-link" href="/categories/spark/">spark</a></li><li class="category-list-item"><a class="category-list-link" href="/categories/spasrk/">spasrk</a></li><li class="category-list-item"><a class="category-list-link" href="/categories/spring/">spring</a></li><li class="category-list-item"><a class="category-list-link" href="/categories/sqoop/">sqoop</a></li></ul></div><div class="widget"><div class="widget-title"><i class="fa fa-star-o"> 标签</i></div><div class="tagcloud"><a href="/tags/jni/" style="font-size: 15px;">jni</a> <a href="/tags/hbase/" style="font-size: 15px;">hbase</a> <a href="/tags/hexo/" style="font-size: 15px;">hexo</a> <a href="/tags/hadoop/" style="font-size: 15px;">hadoop</a> <a href="/tags/HBase/" style="font-size: 15px;">HBase</a> <a href="/tags/hive/" style="font-size: 15px;">hive</a> <a href="/tags/Java/" style="font-size: 15px;">Java</a> <a href="/tags/io/" style="font-size: 15px;">io</a> <a href="/tags/nio/" style="font-size: 15px;">nio</a> <a href="/tags/集合/" style="font-size: 15px;">集合</a> <a href="/tags/java/" style="font-size: 15px;">java</a> <a href="/tags/Flume/" style="font-size: 15px;">Flume</a> <a href="/tags/多线程/" style="font-size: 15px;">多线程</a> <a href="/tags/als-spark-ml/" style="font-size: 15px;">als spark ml</a> <a href="/tags/spark/" style="font-size: 15px;">spark</a> <a href="/tags/shell/" style="font-size: 15px;">shell</a> <a href="/tags/mysql/" style="font-size: 15px;">mysql</a> <a href="/tags/sqoop/" style="font-size: 15px;">sqoop</a> <a href="/tags/spring-spring-data-hadoop/" style="font-size: 15px;">spring spring-data hadoop</a> <a href="/tags/js/" style="font-size: 15px;">js</a> <a href="/tags/js-dataTables/" style="font-size: 15px;">js dataTables</a></div></div><div class="widget"><div class="widget-title"><i class="fa fa-file-o"> 最新文章</i></div><ul class="post-list"><li class="post-list-item"><a class="post-list-link" href="/2016/10/14/flume-trouble-shooting/">flume-trouble-shooting</a></li><li class="post-list-item"><a class="post-list-link" href="/2016/10/14/spring-trouble-shooting/">sqoop-trouble-shooting</a></li><li class="post-list-item"><a class="post-list-link" href="/2016/10/14/sqoop-trouble-shooting/">sqoop-trouble-shooting</a></li><li class="post-list-item"><a class="post-list-link" href="/2016/10/14/shell-trouble-shooting/">shell-trouble-shooting</a></li><li class="post-list-item"><a class="post-list-link" href="/2016/10/14/spark-trouble-shooting/">spark-trouble-shooting</a></li><li class="post-list-item"><a class="post-list-link" href="/2016/09/27/hive-trouble-shooting/">hive-trouble-shooting</a></li><li class="post-list-item"><a class="post-list-link" href="/2016/09/27/hbase-trouble-shooting/">hbase-trouble-shooting</a></li><li class="post-list-item"><a class="post-list-link" href="/2016/09/23/hbase-tips/">hbase-tips</a></li><li class="post-list-item"><a class="post-list-link" href="/2016/09/23/spark-tips/">spark-tips</a></li><li class="post-list-item"><a class="post-list-link" href="/2016/09/23/spark-ml-als/">ALS矩阵分解算法</a></li></ul></div><div class="widget"><div class="widget-title"><i class="fa fa-comment-o"> 最近评论</i></div><script type="text/javascript" src="//hedgehog-zowie.disqus.com/recent_comments_widget.js?num_items=5&hide_avatars=1&avatar_size=32&excerpt_length=20&hide_mods=1"></script></div><div class="widget"><div class="widget-title"><i class="fa fa-external-link"> 友情链接</i></div><ul></ul><a href="https://www.haomwei.com/" title="屠城" target="_blank">屠城</a></div></div></div><div class="pure-u-1 pure-u-md-3-4"><div id="footer">© <a href="/." rel="nofollow">破而后立.</a> Powered by<a rel="nofollow" target="_blank" href="https://hexo.io"> Hexo.</a><a rel="nofollow" target="_blank" href="https://github.com/tufu9441/maupassant-hexo"> Theme</a> by<a rel="nofollow" target="_blank" href="https://github.com/pagecho"> Cho.</a></div></div></div><a id="rocket" href="#top" class="show"></a><script type="text/javascript" src="/js/totop.js?v=0.0.0" async></script><script type="text/javascript" src="//cdn.bootcss.com/fancybox/2.1.5/jquery.fancybox.pack.js" async></script><script type="text/javascript" src="/js/fancybox.js?v=0.0.0" async></script><link rel="stylesheet" type="text/css" href="/css/jquery.fancybox.css?v=0.0.0"><script type="text/javascript" src="/js/codeblock-resizer.js?v=0.0.0"></script><script type="text/javascript" src="/js/smartresize.js?v=0.0.0"></script></div></body></html> | a19cbe3028f3c26e79a910d87ae53be9084f1c07 | [
"HTML"
] | 1 | HTML | hedgehog-zowie/hedgehog-zowie.github.io | c902d8ae7ddf64e4484dec8c8edaf7636f11fcd2 | db6c727f996b1c492f861263a93c92c43de18478 |
refs/heads/master | <file_sep>
<!DOCTYPE html>
<html>
<head>
<link rel="stylesheet" type="text/css" href="index.css">
<title>
Wind turbine item inspection
</title>
</head>
<body >
<h1>
Turbine Safety check
</h1>
<h3>
Wind turbine item inspection
</h3>
<form method="post">
<input type="submit" name="welcomeButton"
class="button" value="Welcome Message" />
<input type="submit" name="resultsButton"
class="button" value="Results" />
</form>
<?php
if(array_key_exists('welcomeButton', $_POST)) {
welcomeButton();
}
else if(array_key_exists('resultsButton', $_POST)) {
reusltsButton();
}
function welcomeButton() {
echo "<h3>HI! My Name is <NAME>,
\n I hope you like my presentation<h3>";
}
function reusltsButton() {
for ($i = 1; $i <= 100; $i++) {
$result = "";
if($i % 3 === 0) {$result .= "Coating Damage";}
if($i % 5 === 0) {
if ($result === "") {$result .= "Lightning Strike";}
else {$result .= " and Lightning Strike";}
}
if($result === "") { $result = $i;}
print "<p>$result</p>";
}
}
?>
</head>
</html> | e06ff67e5849f56534023d28cd372317df8c7417 | [
"PHP"
] | 1 | PHP | rhysbowenharries/safety-inspection | 85640a139317e0b26804e756b34e8f1c97481117 | 1a6e8261be60eb2a7cc36d50f45d6be8fc0d5668 |
refs/heads/master | <file_sep># Glitch-Garden
2D Unity Game
<file_sep>using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class AttackerSpawner : MonoBehaviour
{
[SerializeField] bool canSpawn = true;
[SerializeField] float minSpawnDelay = 1f;
[SerializeField] float maxSpawnDelay = 5f;
[SerializeField] Attacker[] attackerPrefabArray;
// Start is called before the first frame update
IEnumerator Start()
{
while (canSpawn)
{
yield return new WaitForSeconds(Random.Range(minSpawnDelay, maxSpawnDelay));
SpawnAttacker();
}
}
private void SpawnAttacker()
{
var newAttacker = Random.Range(0, attackerPrefabArray.Length);
Spawn(attackerPrefabArray[newAttacker]);
}
// Update is called once per frame
private void Spawn(Attacker attacker)
{
Attacker newAttacker = Instantiate(attacker, transform.position, transform.rotation) as Attacker;
newAttacker.transform.parent = transform; // spawn a new attacker as a child to the game object which instantiated it
}
}
| 9b318c4a6dfe93ba3f05af141beff15b886a56bd | [
"C#",
"Markdown"
] | 2 | C# | sdmccoin/Glitch-Garden | 4a6949b0dde836d194ba689ad2cde6c8b94a080e | e8004aaa38b0659e7edd29023bb757226ee6e9aa |
refs/heads/master | <file_sep># witpol-auto-stu
GitHub Pages
| 78cf687fae5612743a1ecc34baf4c1a0e2c0b8fa | [
"Markdown"
] | 1 | Markdown | Czakanski/witpol-auto-stu | df4d05eadb66faf3bdf11933381129707c25d6c3 | b528cdcd4065470c37b77f2388cff7f018ee2ace |
refs/heads/main | <repo_name>4DvAnCeBoY/pyunit-selenium-sample<file_sep>/requirements.txt
ConfigParser
selenium>2.5
pytest
nose
pytest-xdist
| 3aa024e7d6a1b5e706dcce638ca8af08b78a8481 | [
"Text"
] | 1 | Text | 4DvAnCeBoY/pyunit-selenium-sample | 56c264e46f3a6d6bfa4b973d69a6c11fdd17934a | 4ac36175478fe49c9cda6f47d9767a5b83edeb9d |
refs/heads/master | <file_sep>package mail;
public class gmail {
public static void main(String args[]) {
System.out.println("login");
}
}
<file_sep>package enter;
public class enter {
public static void main(String ar[]) {
System.out.println("home");
}
}
| 5c0a8b04d59c594506f6c1b804839fd6591d4ef8 | [
"Java"
] | 2 | Java | balaganesh915/enter | 3edd1a8aa4555a3c4a42b24ac851812d2e5703d8 | 3e68f8ab07c73f79f824f3b92b77decfe382957b |
refs/heads/master | <repo_name>sivagane/CucumberExcel<file_sep>/src/test/java/org/cucmber/Excl/Utiltity.java
package org.cucmber.Excl;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.OutputStream;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import org.apache.poi.ss.formula.udf.UDFFinder;
import org.apache.poi.ss.usermodel.Cell;
import org.apache.poi.ss.usermodel.CellStyle;
import org.apache.poi.ss.usermodel.CreationHelper;
import org.apache.poi.ss.usermodel.DataFormat;
import org.apache.poi.ss.usermodel.Font;
import org.apache.poi.ss.usermodel.Name;
import org.apache.poi.ss.usermodel.PictureData;
import org.apache.poi.ss.usermodel.Row;
import org.apache.poi.ss.usermodel.Sheet;
import org.apache.poi.ss.usermodel.Workbook;
import org.apache.poi.xssf.usermodel.XSSFWorkbook;
import org.apache.poi.ss.usermodel.Row.MissingCellPolicy;
import org.openqa.selenium.WebElement;
public class Utiltity {
public List<HashMap<String, String>> read() throws IOException {
List<HashMap<String,String>>li=new LinkedList<HashMap<String,String>>();
File f=new File("C:\\Users\\jegan\\eclipse-workspace\\CucumberExcel\\Excel\\siva1.xlsx");
FileInputStream stream=new FileInputStream(f);
Workbook w=new XSSFWorkbook(stream);
Sheet s = w.getSheet("siva1");
Row headrow = s.getRow(0);
for(int i=0;i<s.getPhysicalNumberOfRows();i++) {
Row r = s.getRow(i);
HashMap<String , String>mp=new HashMap<String, String>();
for(int j=0;j<headrow.getPhysicalNumberOfCells();j++) {
Cell c = r.getCell(j);
int type = c.getCellType();
if(type==1) {
String name = c.getStringCellValue();
mp.put(headrow.getCell(j).getStringCellValue(),name);
}
else {
double d = c.getNumericCellValue();
long l=(long)d;
String name = String.valueOf(l);
mp.put(headrow.getCell(j).getStringCellValue(), name);
}
}
li.add(mp);
}
return li;
}
public void launch(String s) {
Hooks.d.get(s);
}
public void type(WebElement w,String s) {
w.sendKeys(s);
}
public void klik(WebElement w) {
w.click();
}
}
<file_sep>/src/test/resources/CucumberExcel.feature
@tag
Feature: To Check the login functionality of the Facebook Application.
@tag1
Scenario: Facebook Application
Given User launches the facebook application
When User Enters the username and password
And User Clicks the login button
Then User Navigates to Home page
<file_sep>/src/test/java/org/StepDefinition/StepDefiniton.java
package org.StepDefinition;
public class StepDefiniton {
}
| 0ff63482a31601c4e4325fa4703d448b6c40bdf6 | [
"Java",
"Gherkin"
] | 3 | Java | sivagane/CucumberExcel | b662a335c050b726b4aa969a4733f5f1a1b85405 | 29ed84b9ce48b2d84d560cdac59cfcb60123b833 |
refs/heads/master | <repo_name>nkwwb/JOS<file_sep>/README.md
# JOS
MIT-JOS project
| a5f049ae86c0c0be0855682e53a2575cf3892e4d | [
"Markdown"
] | 1 | Markdown | nkwwb/JOS | a4ee7de1f6307776d404322414c2443c412eb988 | 3bf6c6e242b9ec6cd26b42799f61aa0452994d3f |
refs/heads/main | <file_sep># Lyrics-Finder
The main screen is very simple, you just have to search for an artist and a song of this artist. Autocomplete is not implemented in this App. The search will be sent to the three API endpoints, and the result will be returned asynchronically.
| 9f4a70e76f05b421a901c34cefb310d3c31c71ae | [
"Markdown"
] | 1 | Markdown | bologrub/Lyrics-Finder | 2fb059d607b2a71acf2a0f3082d2ee77f92b5913 | 76e4fb81aa5f23d08aa27c6ef7b78fe864693147 |
refs/heads/master | <repo_name>andrianovv/chat_HTML_CSS<file_sep>/css/layout.less
/* Layout */
body{
background-color: #F5F5FF;
}
main{
display: flex;
max-width: 1160px;
margin: 0 auto;
background-color: #fff;
.set-font();
font-family: arial, sans-serif;
}
header{
padding: 20px;
}
section{
width: 60%;
// ава юзерів
.user-photo{
.size-box(55px, 55px);
min-width: 55px;
border-radius: 100%;
margin-right: 5px;
background: url(../src/images/my_ava.jpg) no-repeat;
background-size: cover;
}
// поля відображення юзерів і їх повідомлень
.users,
.posts{
overflow: auto;
}
}
.section-left{
width: 40%;
background: @color-bg;
}
.section-right{
padding-top: 85px;
}
footer{
display: flex;
justify-content: center;
padding: 10px 30px;
}<file_sep>/css/section-left.less
/* Section left */
.section-left{
/* блок для відображення користувачів */
.users{
display: flex;
flex-direction: column;
overflow: auto;
height: 690px;
li:last-of-type{
margin-bottom: 0;
}
}
.user{
display: inline-flex;
justify-content: space-between;
align-items: center;
flex-shrink: 0;
width: 100%;
height: 100px;
padding: 22.5px 20px;
&:hover{
background-color: @color-bg-hover;
}
}
.user-info{
overflow: hidden;
width: 70%;
margin-right: 5px;
}
/* приховування великого тексту в рядку і
додавання трьох крапок */
.user-info_name,
.user-info_post{
overflow: hidden;
white-space: nowrap;
text-overflow: ellipsis;
}
.user-info_name{
margin-bottom: 5px;
color: #fff;
}
.user-info_post{
.set-font(14px, @color-text);
}
.user-timeout{
min-width: 55px;
height: 25px;
border-radius: 15px;
background-color: @color-bg-dark;
.set-font(14px, @color-text);
text-align: center;
line-height: 24px;
}
}<file_sep>/css/exit.less
/* login & join CSS */
*{
box-sizing: border-box;
margin: 0;
padding: 0;
}
body{
display: flex;
flex-direction: column;
justify-content: center;
align-items: center;
height: 100vh;
}
button{
border: none;
background: none;
cursor: pointer;
}
a{
text-decoration: none;
}
// сторінка входу & реєстрації
.wrapper-login{
width: 300px;
nav{
display: flex;
width: 100%;
height: 50px;
a{
display: block;
width: 50%;
height: 50px;
padding-top: 7px;
border-top-left-radius: 7px;
outline: none;
background-color: #3B3E99;
font-size: 22px;
color: #fff;
text-align: center;
cursor: pointer;
}
.register{
border: 2px solid #fff;
border-top-left-radius: 0;
border-top-right-radius: 7px;
background-color: #4B4EA4;
}
}
form{
padding: 15px 20px 25px 20px;
border-bottom-left-radius: 7px;
border-bottom-right-radius: 7px;
background-color: #3B3E99;
h2{
padding-bottom: 5px;
color: #fff;
text-align: center;
}
input,
button{
width: 100%;
height: 40px;
margin-top: 20px;
border: none;
border-radius: 6px;
}
input{
padding-left: 12px;
font-size: 15px;
}
button{
margin-top: 25px;
background-color: #BCBCE1;
font-size: 20px;
color: #fff;
cursor: pointer;
}
}
}
// сторінка реєстрації
.wrapper-register{
nav{
a{
border-top-left-radius: 0;
border-top-right-radius: 7px;
}
.login{
border: 2px solid #fff;
border-top-left-radius: 7px;
border-top-right-radius: 0;
background-color: #4B4EA4;
}
}
}<file_sep>/css/header.less
/* Header */
/* section-left header */
header{
form{
display: flex;
justify-content: space-between;
align-items: center;
}
// поле з інпутом і кнопкою пошуку користувачів
.search{
display: flex;
> *{
background-color: @color-bg-input;
}
}
// поле для пошуку користувачів
input[type=text]{
.size-box(125px, 35px);
padding-left: 10px;
border: none;
border-radius: 15px 0 0 15px;
color: @color-text-input;
}
// пошук користувачів
label[for=btn-search]{
.size-box(40px, 35px);
padding-top: 5px;
padding-left: 9px;
border-radius: 0 15px 15px 0;
cursor: pointer;
i{
.set-font(22px, #fff);
}
}
}
<file_sep>/css/btn.less
/* Button */
button{
.size-box(45px, 45px);
color: #fff;
}
header>button{
line-height: 33px;
}
// кнопка для відображення/приховування блоку section--left
.btn-show{
padding-right: 3px;
i{
font-size: 37px;
}
.fa-angle-right{
margin-left: 6px;
}
}
// кнопна меню
.btn-menu{
font-size: 25px;
}
// кнопки footer
.btn-smiley,
.btn-download{
.set-font(22px, @color-btn-smiley);
cursor: pointer;
}
// кнопка завантаження
.btn-download{
margin-left: 10%;
}<file_sep>/css/mixins&other.less
/* Mixins & variables */
// налаштування ширини і висоти блоку
.size-box(@width-box: 0px, @height-box: 0px){
width: @width-box;
height: @height-box;
}
// налаштуваня шрифту - розміру і кольору
.set-font(@size-font:1rem, @color-font: #000){
font-size: @size-font;
color: @color-font;
}
// налаштування горизонтального вирівнювання елементу
.set-aligment(@left-margin){
position: absolute;
left: 50%;
margin-left: -@left-margin;
}
// Colors
// колір для фону
@color-bg: #3B3E99;
@color-bg-dark: #393C8E;
@color-bg-hover: #4B4EA4;
@color-bg-input: #EDEDFA;
@color-bg-post_odd: #989AD7;
@color-bg-post_even: #F1F1FB;
//колір кнопок
@color-btn: #E1E1FC;
@color-btn-smiley: #DADAE6;
@color-btn-submit: #BCBCE1;
// колір тексту
@color-text: #6F71B4;
@color-text-input: #ACACB7;<file_sep>/js/main.js
$(function(){
var showFriends = (function(){
var btnShow = $('.btn-show');
// при натисненні на кнопку .btn-show
btnShow.click(function(){
var btnShowI = $('.btn-show>i');
// зміна стрілки на праву і навпаки
btnShowI.toggleClass('fa-angle-left').toggleClass('fa-angle-right');
});
})();
}) // end ready<file_sep>/css/footer.less
/* Footer */
/* section-right footer */
footer{
.post-new{
position: relative;
width: 75%;
}
// поле пошуку
textarea{
.size-box(90%, 70px);
border: none;
border-radius: 25px 0 0 25px;
padding: 5px 25px 5px 25px;
background-color: @color-bg-input;
font-size: 16px;
font-family: arial;
color: @color-text-input;
}
// вставлення смайликів
label[for=footer_label]{
position: absolute;
right: 0;
.size-box(20%, 70px);
max-width: 65px;
padding-top: 22.5px;
border-radius: 0 25px 25px 0;
background-color: @color-bg-input;
}
}<file_sep>/css/style.less
/* Main CSS */
// фунції і змінні
@import 'mixins&other.less';
// reset css і спільні властивості тегів
@import 'base.less';
// структурні теги
@import 'layout.less';
// стиль header
@import 'header.less';
// стиль лівого section
@import 'section-left.less';
// стиль правого section
@import 'section-right.less';
// стиль footer
@import 'footer.less';
// стиль кнопок
@import 'btn.less';<file_sep>/css/section-right.less
/* Section right */
.section-right{
/* блок повідомлення юзерів */
.posts{
display: flex;
flex-direction: column;
height: 600px;
padding-right: 30px;
padding-left: 30px;
// контейнер одного повідомлення
.post{
display: flex;
justify-content: flex-end;
align-items: flex-end;
flex-wrap: wrap;
max-width: 400px;
span{
.set-font(13px, @color-text);
margin-top: 15px;
margin-bottom: 35px;
}
}
// вибірка непарного повідомлення
>li:nth-of-type(odd){
.post-text{
margin-left: 15px;
background-color: @color-bg-post_odd;
color: #fff;
}
span{
margin-right: 10px;
}
}
// вибірка парного повідомлення
>li:nth-of-type(even){
flex-direction: row-reverse;
align-self: flex-end;
.post-text{
margin-right: 15px;
background-color: @color-bg-post_even;
color: #000;
}
span{
margin-left: 10px;
}
}
}
// ава юзера
.user-photo{
margin-right: 0;
}
// повідомлення юзера
.post-text{
word-wrap:break-word;
max-width: 330px;
padding: 20px 25px;
border-radius: 15px;
}
} | d0929530faccd5ebbdc1f34a0952a2f9920c82fc | [
"JavaScript",
"Less"
] | 10 | JavaScript | andrianovv/chat_HTML_CSS | 00e1db854a54f6caaf40e299587963d66548cabe | fc4906396b708205a9fa128a31793829553ed93f |
refs/heads/master | <repo_name>olafurpg/dot-hot-reload<file_sep>/Makefile
dot:
dot -Tpdf graph.dot -o graph.pdf
<file_sep>/dot-hot-reload.sh
ls *.dot | entr make dot
| 3d30e54506df2b7234979cb31b0c5c6734f44249 | [
"Makefile",
"Shell"
] | 2 | Makefile | olafurpg/dot-hot-reload | c6037cdc1be38ce1fa1134c2c9653b1cfc766d1d | 5ee71998e23c27f2e30b93a9de2c8fe3740abf93 |
refs/heads/master | <repo_name>Ghazikhan/Demoapp<file_sep>/app/views/userinfos/index.html.erb
<p id="notice"><%= notice %></p>
<h1>Listing Userinfos</h1>
<table>
<thead>
<tr>
<th>User</th>
<th>Date of birth</th>
<th>Gender</th>
<th>Mobile</th>
<th>Address</th>
<th colspan="3"></th>
</tr>
</thead>
<tbody>
<% @userinfos.each do |userinfo| %>
<tr>
<td><%= userinfo.user %></td>
<td><%= userinfo.date_of_birth %></td>
<td><%= userinfo.gender %></td>
<td><%= userinfo.mobile %></td>
<td><%= userinfo.address %></td>
<td><%= link_to 'Show', userinfo %></td>
<td><%= link_to 'Edit', edit_userinfo_path(userinfo) %></td>
<td><%= link_to 'Destroy', userinfo, method: :delete, data: { confirm: 'Are you sure?' } %></td>
</tr>
<% end %>
</tbody>
</table>
<br>
<%= link_to 'New Userinfo', new_userinfo_path %>
<file_sep>/app/views/shared/_leftsidebar.html.erb
<aside id="sidebar"><!-- sidebar -->
<h3>Sponsors</h3>
<%= image_tag("ad180.png") %><br /><br />
<h3>Connect With Us</h3>
<%= link_to "Facebook", ('https://www.facebook.com/ghazi.khan.965'), class: "fb-btn2" %><br><br>
<%= link_to "Twitter", ('https://twitter.com/?lang=en'), class: "tw-btn2" %><br><br>
<%= link_to "Google Plus", ('https://plus.google.com/u/0/108681172820250920941/posts'), class: "gp-btn2" %><br><br>
</aside><!-- end of sidebar -->
<file_sep>/app/views/layouts/application.html.erb~
<!DOCTYPE html>
<html>
<head>
<title>Demoapp</title>
<%= stylesheet_link_tag 'application', media: 'all', 'data-turbolinks-track' => true %>
<%= javascript_include_tag 'application', 'data-turbolinks-track' => true %>
<%= csrf_meta_tags %>
</head>
<body>
<div class="container">
<div class="row">
<div class="col-sm-4" ></div>
<div class="col-sm-4" ></div>
<div class="col-sm-4" ></div>
</div>
<div class="container-fluid" style="background-color: #d0e4fe; max-width:1500px">
<%= render 'shared/header' %>
<div class="row">
<div class="col-md-6 col-md-offset-3">
<section class="ads728x90">
<h3> Ads 728x90 </h3>
</section>
</div>
</div>
<div class="row">
<div class="col-md-3">
<section class="leftbar">
<!--<%= render 'shared/leftsidebar' %>-->
</section>
</div>
<div class="col-md-6">
<%= yield %>
</div>
<div class="col-md-3">
<section class="rightbar">
<!--<%= render 'shared/rightsidebar' %>-->
</section>
</div>
</div>
<div class="row">
<div class="col-xs-4">
<h2>Twitter</h2>
<p class="text-justify">HTML is a markup language that is used for creating web pages. The HTML tutorial section will help you understand the basics of HTML, so that you can create your own web pages or website.</p>
<p><a href = "https://twitter.com/pakapply" class = "btn btn-primary btn-lg" role = "button">
Twitter
</a></p>
</div>
<div class="col-xs-4">
<h2>Facebook</h2>
<p class="text-justify">CSS is used for describing the presentation of web pages. The CSS tutorial section will help you learn the essentials of CSS, so that you can fine control the style and layout of your HTML document.</p>
<p>
<a href = "https://www.facebook.com/pakjobposting" class = "btn btn-primary btn-lg" role = "button">
Facebook
</a></p>
</div>
<div class="col-xs-4">
<h2>Google Plus</h2>
<p class="text-justify">Bootstrap is a powerful front-end framework for faster and easier web development. The Bootstrap tutorial section will help you learn the techniques of Bootstrap so that you can .</p>
<p><a href = "https://plus.google.com/u/0/108681172820250920941/posts" class = "btn btn-primary btn-lg" role = "button">
Google Plus
</a></p>
</div>
</div>
<hr>
<div id="footer">
<div class="container-fluid"style="background-color:#C0C0C0">
<p class="text-muted">Copyright ©2015 PAKAPPLY</p>
</div>
</div>
</div>
</body>
</html>
<file_sep>/app/views/userinfos/index.json.jbuilder
json.array!(@userinfos) do |userinfo|
json.extract! userinfo, :id, :user_id, :date_of_birth, :gender, :mobile, :address
json.url userinfo_url(userinfo, format: :json)
end
<file_sep>/app/controllers/work_fields_controller.rb
class WorkFieldsController < ApplicationController
before_action :set_work_field, only: [:show, :edit, :update, :destroy]
# GET /work_fields
# GET /work_fields.json
def index
@work_fields = WorkField.all
end
# GET /work_fields/1
# GET /work_fields/1.json
def show
end
# GET /work_fields/new
def new
@work_field = WorkField.new
@user = current_user
end
# GET /work_fields/1/edit
def edit
end
# POST /work_fields
# POST /work_fields.json
def create
@work_field = WorkField.new(work_field_params)
respond_to do |format|
if @work_field.save
format.html { redirect_to @work_field, notice: 'Work field was successfully created.' }
format.json { render :show, status: :created, location: @work_field }
else
format.html { render :new }
format.json { render json: @work_field.errors, status: :unprocessable_entity }
end
end
end
# PATCH/PUT /work_fields/1
# PATCH/PUT /work_fields/1.json
def update
respond_to do |format|
if @work_field.update(work_field_params)
format.html { redirect_to @work_field, notice: 'Work field was successfully updated.' }
format.json { render :show, status: :ok, location: @work_field }
else
format.html { render :edit }
format.json { render json: @work_field.errors, status: :unprocessable_entity }
end
end
end
# DELETE /work_fields/1
# DELETE /work_fields/1.json
def destroy
@work_field.destroy
respond_to do |format|
format.html { redirect_to work_fields_url, notice: 'Work field was successfully destroyed.' }
format.json { head :no_content }
end
end
private
# Use callbacks to share common setup or constraints between actions.
def set_work_field
@work_field = WorkField.find(params[:id])
end
# Never trust parameters from the scary internet, only allow the white list through.
def work_field_params
params.require(:work_field).permit(:field_name)
end
end
<file_sep>/app/controllers/userinfos_controller.rb
class UserinfosController < ApplicationController
before_action :set_userinfo, only: [:show, :edit, :update, :destroy]
def index
@userinfos = Userinfo.all
end
def show
end
def new
@userinfo = Userinfo.new
end
def edit
end
def create
@user = User.find(params[:user_id])
@userinfo = @user.userinfos.create(userinfo_params)
if @userinfo.save
redirect_to @user, notice: 'User profile was successfully created.'
else
flash[:alert] = "All fields are required...!"
redirect_to @user
end
end
def update
respond_to do |format|
if @userinfo.update(userinfo_params)
format.html { redirect_to @userinfo, notice: 'Userinfo was successfully updated.' }
format.json { render :show, status: :ok, location: @userinfo }
else
format.html { render :edit }
format.json { render json: @userinfo.errors, status: :unprocessable_entity }
end
end
end
def destroy
@userinfo.destroy
respond_to do |format|
format.html { redirect_to userinfos_url, notice: 'Userinfo was successfully destroyed.' }
format.json { head :no_content }
end
end
private
def set_userinfo
@userinfo = Userinfo.find(params[:id])
end
def userinfo_params
params.require(:userinfo).permit(:user_id, :date_of_birth, :gender, :mobile, :address)
end
end
<file_sep>/app/views/shared/_rightsidebar.html.erb
<h3>Right Sidebar</h3>
<p>
The most useful web for the jobless indivudals,
through this web every one can apply for suitable post, we provide apportunity
for bright future, search your desire post which announced by the government/
none Govt organizations. You may also find here rights for your articles/
comments/categories. We post for you most newest news,jobs, social activities,
ideas and career advantage. Our team care for you respect for your ideas and we
also awaiting for your kind suggestions/feedback.</p>
<file_sep>/app/models/work_field.rb
class WorkField < ActiveRecord::Base
has_many :experiences
end
<file_sep>/app/views/shared/_userinfo.html.erb
<% @user.userinfos.each do |info| %>
<p>
<strong>Date of birth:</strong>
<%= info.date_of_birth.strftime("%B %d, %Y") %>
</p>
<p>
<strong>Gender:</strong>
<%= info.gender %>
</p>
<p>
<strong>Mobile:</strong>
<%= info.mobile %>
</p>
<p>
<strong>Address:</strong>
<%= info.address %>
</p>
<% end %>
<hr>
<file_sep>/app/views/users/show.html.erb
<br>
<% if alert %>
<p class="alert alert-danger"><%= alert %></p>
<% end %>
<% if notice %>
<p class="alert alert-success"><%= notice %></p>
<% end %>
<p><strong>Name:</strong> <%= @user.first_name %> <%= @user.last_name %></p>
<p><strong>Email:</strong> <%= mail_to @user.email %></p>
<% if @user.userinfos.exists? %>
<%= render 'shared/userinfo' %>
<% if @user.educations.exists? %>
<%= render 'shared/education' %>
<% end %>
<%= render 'educations/form' %>
<% if @user.experiences.exists? %>
<%= render 'shared/experience' %>
<% end %>
<%= render 'experiences/form' %>
<% else %>
<%= render 'userinfos/form' %>
<% end %>
<file_sep>/app/models/city.rb
class City < ActiveRecord::Base
has_many :institutes
end
<file_sep>/app/views/userinfos/show.html.erb
<p id="notice"><%= notice %></p>
<p>
<strong>User:</strong>
<%= @userinfo.user %>
</p>
<p>
<strong>Date of birth:</strong>
<%= @userinfo.date_of_birth %>
</p>
<p>
<strong>Gender:</strong>
<%= @userinfo.gender %>
</p>
<p>
<strong>Mobile:</strong>
<%= @userinfo.mobile %>
</p>
<p>
<strong>Address:</strong>
<%= @userinfo.address %>
</p>
<%= link_to 'Edit', edit_userinfo_path(@userinfo) %> |
<%= link_to 'Back', userinfos_path %>
<file_sep>/app/controllers/degres_controller.rb
class DegresController < ApplicationController
before_action :set_degre, only: [:show, :edit, :update, :destroy]
# GET /degres
# GET /degres.json
def index
@degres = Degre.all
end
# GET /degres/1
# GET /degres/1.json
def show
end
# GET /degres/new
def new
@degre = Degre.new
end
# GET /degres/1/edit
def edit
end
# POST /degres
# POST /degres.json
def create
@degre = Degre.new(degre_params)
respond_to do |format|
if @degre.save
format.html { redirect_to @degre, notice: 'Degre was successfully created.' }
format.json { render :show, status: :created, location: @degre }
else
format.html { render :new }
format.json { render json: @degre.errors, status: :unprocessable_entity }
end
end
end
# PATCH/PUT /degres/1
# PATCH/PUT /degres/1.json
def update
respond_to do |format|
if @degre.update(degre_params)
format.html { redirect_to @degre, notice: 'Degre was successfully updated.' }
format.json { render :show, status: :ok, location: @degre }
else
format.html { render :edit }
format.json { render json: @degre.errors, status: :unprocessable_entity }
end
end
end
# DELETE /degres/1
# DELETE /degres/1.json
def destroy
@degre.destroy
respond_to do |format|
format.html { redirect_to degres_url, notice: 'Degre was successfully destroyed.' }
format.json { head :no_content }
end
end
private
# Use callbacks to share common setup or constraints between actions.
def set_degre
@degre = Degre.find(params[:id])
end
# Never trust parameters from the scary internet, only allow the white list through.
def degre_params
params.require(:degre).permit(:name, :drege_type)
end
end
<file_sep>/README.md
# Demoapp
# Demoapp1
| 279acf9258f7c8fa47b973b7d8959c897d7a0d4c | [
"HTML+ERB",
"Markdown",
"Ruby"
] | 14 | HTML+ERB | Ghazikhan/Demoapp | fee5d5c9e6dfa89a8edf77b1e37ee7e5282f5e63 | 9958b607e0f4cdaacde815a6176cec4ada44c322 |
refs/heads/master | <file_sep><!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Página Web <NAME></title>
</head>
<style>
body {
background-color: rgb(255, 224, 186);
color: rgb(158, 84, 76);
}
p {
color: rgb(0, 0, 0);
font-size: 15px;
font-family: monospace;
}
#p1 {text-align: center;
font-size: 2em;
font-family: Helvetica;
font-weight: bold;
text-decoration: underline;
}
#p2 {text-align: center;
font-family: Helvetica;
font-size: 2em;
font-weight: bold;
text-decoration: underline;
}
#p3 {text-align: center;
font-family: Helvetica;
font-size: 2em;
font-weight: bold;
text-decoration: underline;
}
#cts {color: rgb(158, 84, 76);
font-family: fantasy;
}
#tit1 {
text-align: center;
}
#tit11 {
text-align: center;
font-size: 13px;
background-color: rgb(255, 255, 255);
}
#indice {
background-color: rgb(255, 255, 255);
border: 3px dashed rgb(179, 196, 151);
margin:15px 15px 15px 15px;
}
#subt {
color: rgb(82, 96, 204);
font-style: italic;
}
#tabla {
font-family: Helvetica, sans-serif;
color: rgb(0, 0, 0);
background-color: rgb(255, 255, 255);
}
#ol1 {
color: rgb(0, 0, 0,)
}
#p {
color: rgb(0, 0, 0);
font-size: 15px;
font-family: monospace;
}
#imagen1 {
margin-left: 425px;
}
</style>
<body>
<h1 id="tit1">Curso de TICS 1º de bachillerato</h1>
<h1 id="tit11">Por <NAME>, 1ºA de bachillerato</h1><br>
<img align="center" src="https://fotos.subefotos.com/5fb6f2a5400c05c00d21e4c600153a87o.jpg"><br>
<h2 id="cts">ÍNDICE</h2>
<ul id="indice">
<li><a href="#p1">Primer trimestre</a></li>
<li><a href="#p2">Segundo trimestre</a></li>
<li><a href="#p3">Tercer trimestre</a></li>
</ul>
<h2 id="p1">PRIMER TRIMESTRE</h2>
<h3 id="subt">¿Qué son las TICS?</h3>
<p>
En primer lugar nos hicimos una idea de que eran las TICS <em><b>(tecnologías de la información y la comunicación)</b></em> y su espectacular expansión en estos tiempos, las bases de su gran desarrollo son:<br>
- La evolución de los ordenadores<br>
- La irrupción de los ordenadores en todos los ámbitos<br>
- La utilización de la tecnología multimedia<br>
- La aparición del módem<br>
- El avance de las telecomunicaciones<br>
- La proliferación de Internet<br>
- La aparición de las nuevas tecnologías para teléfonos móviles<br>
<img id="imagen1" align="center" width="400" src="https://dev-res.thumbr.io/libraries/44/47/73/lib/1562634122031_1.png?size=854x493s&ext=jpg"><br>
</p>
<h3 id="subt">Glosario de conceptos</h3>
<p>
Tambien definimos conceptos relacionados con las TICS, los cuales son:
<table id="tabla">
<thead>
<tr>
<th>Inteligencia artificial</th>
</tr>
</thead>
<tbody>
<tr>
<td>Es la disciplina científica que se ocupa de crear programas informáticos que
ejecutan operaciones que antes era realizada por humanos.</td>
</tr>
</tbody>
<thead>
<tr>
<th>Nanotecnología</th>
</tr>
</thead>
<tbody>
<tr>
<td>Es la tecnología que se dedica al diseño y manipulación de materia a nivel de átomos o moléculas,
con fines industriales o médicos entre otros.</td>
</tr>
</tbody>
<thead>
<tr>
<th>Domótica</th>
</tr>
</thead>
<tbody>
<tr>
<td>Son los sistemas capaces de automatizar una vivienda o edificación de cualquier tipo, aportando servicios de gestión energética,
seguridada, bienestar y comunicación. Pueden ser cableadas e inalámbricas.</td>
</tr>
</tbody>
<thead>
<tr>
<th>Reconocimiento biométrico</th>
</tr>
</thead>
<tbody>
<tr>
<td>La biometría es una tecnología de identificación basada en el reconocimiento de una característica física e intransferible de las personas,
como por ejemplo, la huella digital, el reconocimiento del patrón venoso del dedo o el reconocimiento facial.</td>
</tr>
</tbody>
<thead>
<tr>
<th>Biotecnología</th>
</tr>
</thead>
<tbody>
<tr>
<td>La biotecnología se refiere a toda aplicación tecnológica que utilice sistemas biológicos y organismos vivos o sus derivados para la
creación o modificación de productos o procesos para usos específicos.</td>
</tr>
</tbody>
<thead>
<tr>
<th>Nube o cloud</th>
</tr>
</thead>
<tbody>
<tr>
<td>La nube es un lugar de trabajo público o privado. En la nube, puedes subir archivos y trabajar en proyectos conjuntos
mediante el uso de internet para coordinar el trabajo de distintos usuarios. </td>
</tr>
</tbody>
<thead>
<tr>
<th>GIMPS (número mayor primo)</th>
</tr>
</thead>
<tbody>
<tr>
<td>Great Internet Marcene Prime Search es un proyecto de computación distribuida que utiliza los
programas gratuitos Prime95 y MPrime con el fin de buscar números primos de Mersenne.
<NAME> fundó el proyecto y ha escrito los programas que se encargan de analizar números de Mersenne.<br>
A fecha de 7 de Diciembre de 2018 el número primo más largo es el: 282.589.933 − 1.</td>
</tr>
</tbody>
<thead>
<tr>
<th>Geolocalización</th>
</tr>
</thead>
<tbody>
<tr>
<td>Es la capacidad para obtener la ubicación geográfica real de un objeto, como un radar, un teléfono móvil o un ordenador conectado a Internet,
puede referirse a la consulta de la ubicación, o bien para la consulta real de la ubicación.</td>
</tr>
</tbody>
<thead>
<tr>
<th>Redes sociales (RRSS)</th>
</tr>
</thead>
<tbody>
<tr>
<td>El inicio de las redes sociales en internet se remonta a 1995, cuando <NAME> crea el sitio web classmates.com. Con esta red social se
pretende que la gente pueda recuperar o mantener el contacto con antiguos compañeros del colegio, instituto, universidad, etcétera. <br>
Las redes sociales más utilizadas son Facebook, Instagram y Twitter entre otras con más de 1.000 millones de usuarios activos en
Instagram según cuentas oficiales. <br></td>
</tr>
</tbody>
<thead>
<tr>
<th>Hacker</th>
</tr>
</thead>
<tbody>
<tr>
<td>El término hacker alude a un experto en informática. El concepto tiene dos grandes acepciones ya que puede referirse a
un pirata informático (una persona que accede de manera ilegal a un sistema para tomar su control u obtener datos privados) o a un experto que se
encarga de proteger y mejorar la seguridad informática.</td>
</tr>
</tbody>
<thead>
<tr>
<th>Criptomoneda</th>
</tr>
</thead>
<tbody>
<tr>
<td> La criptomoneda nació como un subproducto de una invención, Bitcoin, que buscaba ser una forma de intercambio de efectivo electrónico.
Las criptomonedas en sí, son datos en una base de datos que necesitan condiciones muy específicas para ser cambiadas.
Se cambian con el minado de criptomonedas. </td>
</tr>
</tbody>
<thead>
<tr>
<th>Criptografía</th>
</tr>
</thead>
<tbody>
<tr>
<td>Es la técnica que protege documentos y datos. Funciona a través de la utilización de cifras o códigos para escribir algo
secreto en documentos y datos confidenciales. Las claves criptográficas pueden ser:
<ol>
<li><b>Simétricas:</b> Es la utilización de algoritmos para descifrar, encriptar y ocultar documentos.</li>
<li><b>Asimétricas:</b>Hay dos tipos de llaves. Públicas y privadas. Las públicas son aquellas que son accesibles para cualquier persona,
y las privadas son aquellas que solo la persona que lo recibe es capaz de descifrar.</li>
</ol></td>
</tr>
</tbody>
<thead>
<tr>
<th>Redes Neuronales</th>
</tr>
</thead>
<tbody>
<tr>
<td>Una red neuronal es un modelo computacional vagamente inspirado en el comportamiento observado en su homólogo biológico.
Consiste en un conjunto de unidades, llamadas neuronas artificiales, conectadas entre sí para transmitirse señales.</td>
</tr>
</tbody>
<thead>
<tr>
<th>Nanotecnología</th>
</tr>
</thead>
<tbody>
<tr>
<td>Tecnología que se dedica al diseño y manipulación de la materia a nivel de átomos o moléculas,
con fines industriales o médicos, entre otros. </td>
</tr>
</tbody>
<thead>
<tr>
<th>Big Data</th>
</tr>
</thead>
<tbody>
<tr>
<td>Big Data o también llamado Macrodatos es un término evolutivo que describe la cantidad voluminosa de datos estructurados,
semiestructurados y no estructurados que tienen el potencial de ser extraídos para obtener información.
Son unos datos tan sumamente grandes y complejos que no lo pueden procesar las aplicaciones informáticas tradicionales. </td>
</tr>
</tbody>
<thead>
<tr>
<th>Cookies</th>
</tr>
</thead>
<tbody>
<tr>
<td>Las cookies son pequeños archivos que algunos sitios web guardan en tu ordenador. Almacenan información sobre ti,
como nombre de usuario o información de registro, o preferencias de usuario.<br>
Se guardan para ser usadas en los anuncios. Estudian tus intereses y te enseñan anuncios relacionados con lo que te gusta.</td>
</tr>
</tbody>
<thead>
<tr>
<th>Cibernética</th>
</tr>
</thead>
<tbody>
<tr>
<td>La cibernética es la ciencia que estudia los sistemas de comunicación y de regulación automática de los seres
vivos y los aplica a sistemas electrónicos y mecánicos que se parecen a ellos.</td>
</tr>
</tbody>
<thead>
<tr>
<th>App</th>
</tr>
</thead>
<tbody>
<tr>
<td>Una aplicación es simplemente un programa informático creado para llevar a cabo o
facilitar una tarea en un dispositivo informático. Cabe destacar que aunque todas las aplicaciones son programas,
no todos los programas son aplicaciones.</td>
</tr>
</tbody>
</table>
</p>
<h3 id="subt">Evolución de las TICS</h3>
<p> Hemos visto como han ido evolucionando las TICS en sus diferentes ámbitos:
<ol id="p">
<li><b>Proliferación de internet:</b><br>
<ul> <br><li>Internet se creó en 1969 y revolucionó el mundo</li><br>
<li>En 1971, <NAME> envió el primer email</li><br>
<li>Dos años más tarde, la palabra internet se usó por primera vez en una transmisión de control de protocolo.</li><br>
<li>1982 fue la fecha que marcó un antes y un después por el gran auge que supusieron los emoticonos</li> <br>
<li>Yahoo se funda en 1994 y justo al año siguiente Microsoft lanza Internet Explorer. </li> <br>
<li>Un año muy importante en la historia de internet es 1998 por dos motivos. En primer lugar nace Google
(con el tiempo acabaría siendo conocido como el gigante de las búsquedas) y el número de usuario de internet alcanza un millón
(cifra que ahora parece irrisoria). </li><br>
<li>En 2001 aparece la mayor enciclopedia colectiva, Wikipedia. </li><br>
<li>Entre el 2003 y 2005 se dan varias innovaciones gracias a la aparición de Safari, MySpace, LinkedIn, Skype y WordPress. </li><br>
<li>En 2004 aparece Facebook, Gmail, Flickr y Vimeo. Sin embargo, Youtube tuvo que esperar a 2005 para ver la luz. </li><br>
<li>Chrome de Google nace en 2008 y dos años después nace Instagram, aunque sólo disponible para Apple.
Pinterest, que nace en 2010 consigue 10 millones de usuarios más rápido que las otras redes.
Por último, 2012 sirve para que internet alcance los 2,4 mil millones de internautas. </li><br>
</ul></li>
<br>
<li><b>Evolución de los ordenadores:</b>
<ul>
<li>El primer sistema informático programable se creó en el año 1936 por <NAME> y lo llamó Z1<br>
<img width="400" src="https://upload.wikimedia.org/wikipedia/commons/e/e5/Zuse_Z1-2.jpg"><br>
</li>
<li>La primera compañía en vender ordenadores fue ABC computers <br>
<img width="400" src="https://upload.wikimedia.org/wikipedia/commons/0/01/Atanasoff-Berry_Computer_at_Durhum_Center.jpg"><br>
</li>
<br>
<li>10 años después nació el transistor que se convertiría en una parte esencial del ordenador.
El primer ordenador con transistor se llamaba ENIAC1<br>
<img width="400" src="https://upload.wikimedia.org/wikipedia/commons/3/3b/Two_women_operating_ENIAC.gif"><br>
</li>
<br>
<li>En 1953 la empresa IBM sacaría al mercado el ordenador IBM 701EDMP.<br>
<img width="400" src="https://upload.wikimedia.org/wikipedia/commons/thumb/b/b0/IBM_701_frame.jpg/220px-IBM_701_frame.jpg"><br>
</li>
<br>
<li>En 1954 se desarrolló el primer lenguaje de programación llamado Fortran.<br>
<img width="400" src="https://upload.wikimedia.org/wikipedia/commons/thumb/5/58/FortranCardPROJ039.agr.jpg/1280px-FortranCardPROJ039.agr.jpg"><br>
</li>
<br>
<li>En 1958 se creó el circuito integrado, también llamado chip.<br>
<img width="400" src="https://upload.wikimedia.org/wikipedia/commons/thumb/5/5c/Microchips.jpg/1200px-Microchips.jpg"><br>
</li>
<br>
<li>En los años 60 salieron algunos ordenadores importantes como: Scelbi, Mark-8 Altair,IBM5100,
Apple 1 y 2, TRS-80 y el commodere pet.<br>
<img width="400" src="data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBxMTEhUSExIWFhUXFxgYFxcYGB0YFxcXGBgXFxcYFRcYHSggGBolHRUXITEhJSkrLi4uFx8zODMtNygtLisBCgoKDg0OGxAQGzUlHyUtLS0tLS0wLi4tLS0vLi0tLS0tLS0vLS0tLS0tLS0tLS0tLS0<KEY>"><br>
</li>
<br>
<li>En 1979 salió a la venta WordStar, el primer procesador de texto <br>
<img width="400" src="https://upload.wikimedia.org/wikipedia/en/e/e3/Wordstar_Screenshot.png"><br>
</li>
<br>
<li>En 1981 Microsoft saca a la venta el sistema operativa MS-DOS.<br>
<img width="400" src="https://upload.wikimedia.org/wikipedia/commons/7/79/IBM_PC_DOS_1.0_screenshot.jpg"><br>
</li>
<br>
<li>En 1983 el ordenador Apple Lisa fue el primero que contenía interfaz gráfica.<br>
<img width="400" src="https://upload.wikimedia.org/wikipedia/commons/7/7d/Apple-LISA-Macintosh-XL.jpg"><br>
</li>
<br>
<li>En 1985 Microsoft presenta el sistema operativo Windows 1.0 <br>
<img width="400" src="https://upload.wikimedia.org/wikipedia/commons/d/df/Microsoft_Windows_1.0_screenshot.png"><br>
</li>
<br>
<li>En 1990 se crea el World Wide web (www).<br>
<img width="400" src="https://www.elheraldo.co/sites/default/files/styles/width_860/public/articulo/2016/08/24/25_anos_www_0.jpg?itok=iWxEZOZi"><br>
</li>
<br>
<li>En 1991 <NAME> comenzó a desarrollar el sistema operativo Linux <br>
<img width="400" src="https://www.muycanal.com/wp-content/uploads/2014/03/PCLinux.jpg"><br>
</li>
<br>
<li>En 2001 se lanza el sistema operativo Windows XP por Mircosoft.<br>
<img width="400" src="https://s1.eestatic.com/2019/08/27/omicrono/Omicrono_424718385_133129821_1706x960.jpg"><br>
</li>
<br>
<li>En 2005 se crea youtube <br>
<img width="400" src="https://www.webdesignmuseum.org/uploaded/timeline/youtube/youtube-2005.png"><br>
</li>
<br>
<li>En 2006 lanzamiento de el sistema operativo de Microsoft Windows vista <br>
<img width="400" src="https://winphonemetro.com/files/2017/02/microsoft-to-kill-off-windows-vista-in-less-than-60-days-512956-2-700x383.jpg"><br>
</li>
</ul></li>
<br>
<li><b>Aparición del módem:</b><br>
La historia de internet comienza en 1958, la compañía BELL creaba el primer modem capaz de transmitir datos binarios sobre una
línea telefónica simple. Con aquel dispositivo se daba el primer paso para que más tarde, en 1967 se comenzase a
hablar de ARPANET (una red de computadoras creada por el Departamento de Defensa de los Estados Unidos para utilizarla como
medio de comunicación entre las diferentes instituciones.)
</li>
<br>
<li><b>Evolución de los teléfonos móviles</b><br>
<ul>
<li><em><b>Primera generación:</b></em><br>
En 1973, <NAME> realiza la primera llamada a través del móvil de modelo Motorola DynaTAC 8000X en Nueva York.
11 años más tarde, comenzó a comercializarse. <br>
Pero este teléfono tenía unas desventajas notables: pesaba 1,1 kilogramos y aunque con este prototipo se disponía de 30
minutos de conversación, requería 10 horas de carga.<br>
<img width="200" src="https://static.turbosquid.com/Preview/2015/10/21__16_08_04/Motorola_DynaTac_00.jpg9ce0e652-7651-419c-9314-08173a8bc306Zoom.jpg">
</li>
<li><em><b>Segunda generación:</b></em><br>
Entre 1990 y 1995 se produjo un cambio drástico en el diseño y la portabilidad. Los dispositivos móviles empezaron a aparecer
poco a poco en manos del consumidor medio por primera vez. Esto hizo que se produjese un interés repentino por la tecnología
por lo que, en diciembre de 1992, se envió el primer mensaje SMS, enviado por la red GSM de Vodafone en el que se leía:
<b>Merry_Christmas.</b><br> En 1997 llegó el modelo Siemens S10, el primer teléfono en ofrecer una pantalla a color.
Sin embargo, solo tenía cuatro colores y solo el texto estaba en color.<br>
<img width="200" src="https://i.blogs.es/6fa782/2012_02_15_siemens1/1366_2000.jpg">
</li>
<li><em><b>Tercera generación:</b></em><br>
Con el 2G, se pudo simplificar la fabricación de teléfonos, reducir su tamaño y tenía un coste más barato, esto consiguió que
comenzaran a comercializara para el público general. El primer contenido descargable vendido a teléfonos móviles fue el tono
de llamada, que sentó las bases de toda una industria. Con la llegada de la generación de transición al 3G, se introdujeron
cambios importantes en el campo de la telefonía, lo que aumentó los servicios ofrecidos por 2G. Fue entonces cuando Nokia lanzó
el Nokia 8110.<br>
<img width="200" src="https://mobileunlock24.com/5945-large_default/desbloquear-nokia-8110-4g.jpg">
</li>
<li><em><b>Cuarta generación:</b></em><br>
En 1999, los BlackBerry triunfaron, se consideraban la mejor herramienta para los hombres de negocios, que podías responder a
correos electrónicos desde le móvil. <br> A finales de los 90, se dio lugar a un crecimiento exponencial de la industria móvil.
Los dispositivos móviles se estaban convirtiendo rápidamente en la norma.<br>
<img width="200" src="https://images-na.ssl-images-amazon.com/images/I/81tnErm2w6L._AC_SX355_.jpg"><br>
Apareció la generación del 3G, que introdujo el uso de los datos, permitiendo el desarrollo de las comunicaciones móviles a través de la red GSM.
Está red fue la que sentó las bases para que el usuario pudiera conectarse a Internet e interactuar desde sus dispositivos de otra manera.<br>
En 2007, <NAME>, presentó el iPhone. Combinando las características de un teléfono móvil, un ordenador y un reproductor multimedia, se revolucionó todo.
iOS y Android popularizaron las aplicaciones con millones de consumidores, logrando que las interfaces con pantalla táctil fueran lo común.<br>
El 3G dio como resultado un aumento de la capacidad de transmisión y recepción de datos, además de obtener un mayor nivel de seguridad en las comunicaciones,
fue posible tener la capacidad de conectarse a Internet.<br>
<img width="200" src="https://t.ipadizate.es/2018/03/iphone-2g-2.jpg"><br>
</li>
<li><em><b>Quinta generación:</b></em><br>
En 2009 se anunció públicamente la conexión 4G. En definitiva, esta nueva tecnología permitía alcanzar velocidades, como mínimo,
10 veces más rápidas que las del 3G.<br>
Desde la cuarta generación, los teléfonos han ido evolucionando, incluyendo 3 lentes para la cámara, sensores faciales y de huella dactilar,
procesadores cada vez más potentes, etc.<br>
Actualmente, teléfonos como el iPhone X, han revolucionado todo lo que conocíamos de los teléfonos, al igual que la red de 5G lanzado este último año,
que se comenzará a utilizar cada vez mas.<br>
<img width="200" src="https://www.worten.es/i/f5633bd5416787813fd7a9a649cc4bc265c22556.jpg"><br>
</li>
</ul>
</li>
</ol>
</p>
<h3 id="subt">Falacias en internet</h3>
<p>
En esta evaluación tambien hemos podido comprobar lo fácil que es introducir contenido falso en internet y difundirlo. En mi caso hice una noticia falsa junto a mis compañeros, que era
sobre el fichaje de Neymar Jr por el Athletic Club de Bilbao. Cogimos la wikipedia de Neymar, un twit de el Athletic de Bilbao y una foto de Neymar y de un jugador del Athletic. Luego lo
editamos todo con Photoshop para que pareciera verídico. (la imágen está editada por mi)<br>
<img width="500" src="https://fotos.subefotos.com/584fb493e89c8b89493c94d7b4920d81o.png"><br>
</p>
<br>
<br>
<h2 id="p2">SEGUNDO TRIMESTRE</h2>
<h3 id="subt">Modelo OSI</h3>
<img id="imagen1" src="https://fotos.subefotos.com/b2d37babd215f87cbc06fb708e19465fo.png"><br>
<p> Al principio de la segunda evaluación estuvimos viendo el modelo OSI y sus siete capas, en mi caso tuve que hacer junto a dos compañeros un trabajo sobre
la capa física.
<ol id="p">
<li><b>CAPA FÍSICA</b><br>Se encarga de las conexiones físicas de la computadora hacia la red, en lo que se refiere al
medio físico, características del medio y la forma en la que se transmite la información
</li><br>
<li><b>CAPA DE ENLACE</b><br>Responsable de la transferencia fiable de información a través de un circuito de transmisión de datos.
Recibe peticiones de la capa de red y utiliza los servicios de la capa física.<br> El objetivo de la capa de enlace es conseguir que la información fluya, libre de errores,
entre dos máquinas que estén conectadas directamente.
</li><br>
<li><b>CAPA DE RED</b><br> Se encarga de buscar un camino para que la información llegue de un ordenador a otro, para esto necesita:
<ul>
<li>Conocer la topología de red de comunicaciones.
</li>
<li>Disponer de algoritmos para escoger las trayectorias adecuadas.
</li>
<li>Ser capaz de balancear la carga entre diferentes nodos.
</li>
<li>Poder gestionar las diferencias entre distintas redes.
</li>
</ul>
</li><br>
<li><b>CAPA DE TRANSPORTE</b><br>La capa transporte es el cuarto nivel del modelo OSI encargado de la transferencia libre de errores
de los datos entre el emisor y el receptor, aunque no estén directamente conectados, así como de mantener el flujo de la red. <br>
Esta capa está estrechamente relacionada con la capa de red, y muchos de sus protocolos están adaptados a los protocolos de dicha capa.<br>
La capa de transporte permite establecer una comunicación fiable utilizando redes que no lo son.
</li><br>
<li><b>CAPA DE SESIÓN</b><br>La capa de sesión es el quinto nivel del modelo OSI,1 que proporciona los mecanismos para controlar el diálogo entre las aplicaciones
de los sistemas finales.<br> En muchos casos, los servicios de la capa de sesión son parcialmente, o incluso, totalmente prescindibles. <br>
No obstante en algunas aplicaciones su utilización es ineludible.
</li><br>
<li><b>CAPA DE PRESENTACIÓN</b><br>La capa de presentación es el sexto nivel del Modelo OSI,1 y es el que se encarga de la representación de la información,
de manera que aunque distintos equipos puedan tener diferentes representaciones internas de caracteres (ASCII, Unicode, EBCDIC),
números (little-endian tipo Intel, big-endian tipo Motorola), sonido o imágenes, los datos lleguen de manera reconocible.
</li><br>
<li><b>CAPA DE APLICACIÓN</b><br>Es el séptimo nivel de la red según el sistema OSI, ofrece a las aplicaciones la posibilidad de acceder a los servicios de las demás capas,
sincroniza las aplicaciones y establece la disponibilidad de los socios de comunicación deseados.
</li><br>
</ol>
</p>
<h3 id="subt">CoreWarUCM</h3>
<p>
En el tramo final de la segunda evaluación tuvimos que empezar a programar viruses para CoreWarUCM. Para esto utilizamos REDCODE, un lenguaje de programación con el programa A.R.E.S,
para poner en práctica nuestros viruses hicimos un toreno con todos los distintos viruses de los alumnos, torneo que ganó <NAME> y del que yo fuí eliminado en primera ronda.
(se pudo comprobar que programar viruses no es lo mío)<br>
También tuvimos la oportunidad de hacer una salida a la Escuela Politécnica Superior de la Universidad Autónoma de Madrid y así poder recibir indicaciones y consejos de expertos en la materia.
En la salida también tuvimos la oportunidad de visitar una exposición que había con la historia de ordenadores, videojuegos... antiguos que fue muy interesante y recomendable para años siguientes.<br>
<br><img width="400" src="https://upload.wikimedia.org/wikipedia/commons/e/e7/Madrid_-_Campus_de_la_Universidad_Aut%C3%B3noma_de_Madrid_%28UAM%29-Escuela_Polit%C3%A9cnica_Superior_1.JPG"><br>
</p>
<br>
<h2 id="p3">TERCER TRIMESTRE</h2>
<h3 id="subt">HTML</h3>
<p>
Este último trimestre ha sido extraordinario, poco común. No hemos tenido la oportunidad de ir al colegio y todo lo que hemos hecho de TICS ha sido desde casa.
Hemos estado usando khan academy para empezar a programar con C++ (el lenguaje de programación que se usa en el mismo programa con el que estoy haciendo esta web, Notepad++),
y hemos ido adquiriendo técnicas y conceptos para poder hacer páginas web funcionales y originales. Para mi fue fácil empezar porque ya en tercero de la E.S.O tuve TICS y empecé
a adquirir lo básico de este lenguaje de programación. Pero lo aprendido en este tercer trimestre me ha servido para ampliar conceptos y conseguir hacer esta misma página web.<br>
<br> <img width="500" src="https://fotos.subefotos.com/a67b962269c5d7d5b6b21368dcc72598o.png"><br>
<br>
<img width="500" src="https://fotos.subefotos.com/21e077aa440ca788e85ab4f035752807o.png"><br>
</p>
<br>
<h2 id="p3">CONCLUSIÓN</h2>
<p>
En este curso he aprendido diversos conceptos que me han sido de gran ayuda a la hora de hacerme una idea de las TICS, tanto en la teoría como en la práctica. Hemos aprendido historia
y evolución de los aparatos electrónico y sistemas de comunicación entre dispositivos. Me ha sido de gran ayuda y también como una orientación a la hora de ir eligiendo intinerario. Tengo mucho
interés en ver como se desarrollará la asignatura de TICS el curso próximo si todo va bien (que espero que asi sea). <br>
<br>
<NAME> 1ºA de Bachillerato.
</p>
<br>
</body>
</html>
| 254e4fee0a34115d6d8dfcbd5c87767ac3570001 | [
"HTML"
] | 1 | HTML | EnriqueSanzTur/EnriqueSanzTur.github.io | 685d6a60a2d66ec04c5e330528f539d273e77c49 | 19e5b30f5acc22de136883e141eb627180aa57e7 |
refs/heads/main | <file_sep>import json
import sys
import time
import requests as req
from PyP100 import PyP100
d = {}
d["jsonrpc"] = "2.0"
d["method"] = "GetSystemStatus"
d["id"] = "13.4"
d["params"] = 'null'
# Carico il file di configurazione passato da input
if len(sys.argv) != 2:
raise ValueError('Bisogna passare il file di configurazione.')
pathFile = sys.argv[1]
def getConfigurazione():
fileConfig = open(pathFile)
configurazioni = {}
for line in fileConfig.read().splitlines():
configurazioni[line.split(' = ')[0]] = line.split(' = ')[1]
'''if configurazioni[line.split(' = ')[0]] == 'True':
configurazioni[line.split(' = ')[0]] = True
if configurazioni[line.split(' = ')[0]] == 'False':
configurazioni[line.split(' = ')[0]] = False'''
return configurazioni
def getPresaTapo():
configurazione = getConfigurazione()
p100 = PyP100.P100(configurazione["ipPresa"], configurazione["emailAccountTapo"], configurazione["passwordAccountTapo"])
p100.handshake()
p100.login()
return p100, json.loads(p100.getDeviceInfo())["result"]
def accendiPresa(presa):
presa.turnOn()
presa, info = getPresaTapo()
return info["device_on"] == True
def spegniPresa(presa):
presa.turnOff()
presa, info = getPresaTapo()
return info["device_on"] == False
def check():
configurazione = getConfigurazione()
headers = {"Host": "mw40.home",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:88.0) Gecko/20100101 Firefox/88.0",
"Accept": "text/plain, */*; q=0.01",
"Accept-Language": "it-IT,it;q=0.8,en-US;q=0.5,en;q=0.3",
"Accept-Encoding": "gzip, deflate",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"_TclRequestVerificationKey": "<KEY>",
"_TclRequestVerificationToken": "null",
"X-Requested-With": "XMLHttpRequest",
"Content-Length": "70",
"Origin": "http://mw40.home",
"Connection": "keep-alive",
"Referer": "http://mw40.home/index.html",
"Cookie": "",
"Sec-GPC": "1"}
r = req.post(configurazione["url"], data=json.dumps(d), headers=headers)
informazioni = json.loads(r.content)["result"]
batteria = int(informazioni["bat_cap"])
inCarica = informazioni["chg_state"]
presa, informazioniPresa = getPresaTapo()
statoPresa = informazioniPresa["device_on"]
scriviLog("Batteria: " + str(batteria) + ", inCarica: " + str(inCarica) + ", statoPresa: " + str(statoPresa))
if inCarica == 0:
# Sta caricando
time.sleep(60 * int(configurazione["minutiAttesa"]))
check()
elif inCarica == 1:
# Ha finito di caricare
sendIFTTTNotification(configurazione["urlWebhookIFTTT"], "Batteria carica del modem TIM")
if spegniPresa(presa) == False:
sendIFTTTNotification(configurazione["urlWebhookIFTTT"], "Errore nello spegnimento della presa Tapo")
time.sleep(60 * 5)
check()
elif inCarica == 2:
# Non è in carica
if batteria < int(configurazione["minimoBatteria"]):
sendIFTTTNotification(configurazione["urlWebhookIFTTT"], "Caricare modem TIM!!")
if accendiPresa(presa) == False:
sendIFTTTNotification(configurazione["urlWebhookIFTTT"], "Errore nell'accensione della presa Tapo")
time.sleep(60 * 5)
else:
time.sleep(60 * int(configurazione["minutiAttesa"]))
check()
def sendIFTTTNotification(urlWebhook = "", testo = ""):
req.post(urlWebhook, json={'value1': testo})
scriviLog("Notifica: " + testo)
def scriviLog(testo):
pathLog = getConfigurazione()["pathLog"]
fileLog = open(pathLog, "a+")
# Aggiungo il timestamp al log
t = "[" + time.asctime(time.localtime(time.time())) + "] " + str(testo)
fileLog.write(t)
fileLog.write("\n")
fileLog.close()
'''if getConfigurazione()["urlOnlineLogWriter"]:
req.post(getConfigurazione()["urlOnlineLogWriter"], data={'riga': t})'''
def main():
scriviLog("Avvio")
check()
try:
main()
except:
scriviLog("Qualquadra non cosa")
sendIFTTTNotification(getConfigurazione()["urlWebhookIFTTT"], "Qualquadra non cosa.")
main()
finally:
scriviLog("Passo e chiudo")
configurazione = getConfigurazione()
sendIFTTTNotification(configurazione["urlWebhookIFTTT"], "Programma terminato.")
<file_sep># checkModemAlcatelMW40V
Programma che interroga la home del modem per recuperare informazioni, avvisare l'utente ed gestire l'alimentazione.
## Materiale utilizzato
- Raspberry Pi 3B+;
- presa Tapo TP-LINK P100;
- modem Alcatel MW40V.
## Software utilizzato
- Python3;
- libreria [PyP100](https://github.com/fishbigger/TapoP100).
## Installazione
Per utilizzare questo script bisogna installare la versione 3 di Python (necessaria per la libreria PyP100).
Per installare PyP100 eseguire
```
pip3 install PyP100
```
## Configurazione
Il file di configurazione comprende i seguenti parametri:
- urlWebhookIFTTT: url del webhook per la chiamata al servizio IFTTT;
- url: url homepage modem;
- minutiAttesa: attesa, espressa in minuti, tra un controllo ed un'altra;
- minimoBatteria: livello della batteria a cui si deve avviare la carica;
- ipPresa: indirizzo ip della presa Tapo;
- emailAccountTapo: email dell'account Tapo con cui è stato registrato il dispositivo;
- passwordAccountTapo: password dell'account Tapo di cui è stato specificato l'email. ATTENZIONE: La password deve essere al massimo 8 caratteri. Leggere [P100 issues](https://github.com/fishbigger/TapoP100/issues);
- pathLog: path del file log.
## Utilizzo
Prima di utilizzare lo script bisogna settare alcuni parametri nel file di configurazione. I parametri sono tutti obbligatori (per il momento). Una volta riempiti tutti i campi nel file di configurazione è possibile eseguire lo script tramite
```
python3 check.py ./config
```
Da notare che l'ultimo parametro è il file di configurazione ed è obbligatorio.
| 7fc2fecd5d095c75cd86245ffaa50c101919fcde | [
"Markdown",
"Python"
] | 2 | Markdown | DanieleLupo94/checkModemAlcatelMW40V | 85797c8f2347be386cc579cf7c51eeb6e0f7de25 | a610ab0fa27cea25c0d9badbac810465956bb0e4 |