repo_name
stringlengths
5
85
path
stringlengths
3
252
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
922
999k
license
stringclasses
15 values
javelinanddart/android_kernel_samsung_msm8974
fs/minix/dir.c
6
11456
/* * linux/fs/minix/dir.c * * Copyright (C) 1991, 1992 Linus Torvalds * * minix directory handling functions * * Updated to filesystem version 3 by Daniel Aragones */ #include "minix.h" #include <linux/buffer_head.h> #include <linux/highmem.h> #include <linux/swap.h> typedef struct minix_dir_entry minix_dirent; typedef struct minix3_dir_entry minix3_dirent; static int minix_readdir(struct file *, void *, filldir_t); const struct file_operations minix_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, .readdir = minix_readdir, .fsync = generic_file_fsync, }; static inline void dir_put_page(struct page *page) { kunmap(page); page_cache_release(page); } /* * Return the offset into page `page_nr' of the last valid * byte in that page, plus one. */ static unsigned minix_last_byte(struct inode *inode, unsigned long page_nr) { unsigned last_byte = PAGE_CACHE_SIZE; if (page_nr == (inode->i_size >> PAGE_CACHE_SHIFT)) last_byte = inode->i_size & (PAGE_CACHE_SIZE - 1); return last_byte; } static inline unsigned long dir_pages(struct inode *inode) { return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT; } static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len) { struct address_space *mapping = page->mapping; struct inode *dir = mapping->host; int err = 0; block_write_end(NULL, mapping, pos, len, len, page, NULL); if (pos+len > dir->i_size) { i_size_write(dir, pos+len); mark_inode_dirty(dir); } if (IS_DIRSYNC(dir)) err = write_one_page(page, 1); else unlock_page(page); return err; } static struct page * dir_get_page(struct inode *dir, unsigned long n) { struct address_space *mapping = dir->i_mapping; struct page *page = read_mapping_page(mapping, n, NULL); if (!IS_ERR(page)) kmap(page); return page; } static inline void *minix_next_entry(void *de, struct minix_sb_info *sbi) { return (void*)((char*)de + sbi->s_dirsize); } static int minix_readdir(struct file * filp, void * dirent, filldir_t filldir) { unsigned long pos = filp->f_pos; struct inode *inode = filp->f_path.dentry->d_inode; struct super_block *sb = inode->i_sb; unsigned offset = pos & ~PAGE_CACHE_MASK; unsigned long n = pos >> PAGE_CACHE_SHIFT; unsigned long npages = dir_pages(inode); struct minix_sb_info *sbi = minix_sb(sb); unsigned chunk_size = sbi->s_dirsize; char *name; __u32 inumber; pos = (pos + chunk_size-1) & ~(chunk_size-1); if (pos >= inode->i_size) goto done; for ( ; n < npages; n++, offset = 0) { char *p, *kaddr, *limit; struct page *page = dir_get_page(inode, n); if (IS_ERR(page)) continue; kaddr = (char *)page_address(page); p = kaddr+offset; limit = kaddr + minix_last_byte(inode, n) - chunk_size; for ( ; p <= limit; p = minix_next_entry(p, sbi)) { if (sbi->s_version == MINIX_V3) { minix3_dirent *de3 = (minix3_dirent *)p; name = de3->name; inumber = de3->inode; } else { minix_dirent *de = (minix_dirent *)p; name = de->name; inumber = de->inode; } if (inumber) { int over; unsigned l = strnlen(name, sbi->s_namelen); offset = p - kaddr; over = filldir(dirent, name, l, (n << PAGE_CACHE_SHIFT) | offset, inumber, DT_UNKNOWN); if (over) { dir_put_page(page); goto done; } } } dir_put_page(page); } done: filp->f_pos = (n << PAGE_CACHE_SHIFT) | offset; return 0; } static inline int namecompare(int len, int maxlen, const char * name, const char * buffer) { if (len < maxlen && buffer[len]) return 0; return !memcmp(name, buffer, len); } /* * minix_find_entry() * * finds an entry in the specified directory with the wanted name. It * returns the cache buffer in which the entry was found, and the entry * itself (as a parameter - res_dir). It does NOT read the inode of the * entry - you'll have to do that yourself if you want to. */ minix_dirent *minix_find_entry(struct dentry *dentry, struct page **res_page) { const char * name = dentry->d_name.name; int namelen = dentry->d_name.len; struct inode * dir = dentry->d_parent->d_inode; struct super_block * sb = dir->i_sb; struct minix_sb_info * sbi = minix_sb(sb); unsigned long n; unsigned long npages = dir_pages(dir); struct page *page = NULL; char *p; char *namx; __u32 inumber; *res_page = NULL; for (n = 0; n < npages; n++) { char *kaddr, *limit; page = dir_get_page(dir, n); if (IS_ERR(page)) continue; kaddr = (char*)page_address(page); limit = kaddr + minix_last_byte(dir, n) - sbi->s_dirsize; for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) { if (sbi->s_version == MINIX_V3) { minix3_dirent *de3 = (minix3_dirent *)p; namx = de3->name; inumber = de3->inode; } else { minix_dirent *de = (minix_dirent *)p; namx = de->name; inumber = de->inode; } if (!inumber) continue; if (namecompare(namelen, sbi->s_namelen, name, namx)) goto found; } dir_put_page(page); } return NULL; found: *res_page = page; return (minix_dirent *)p; } int minix_add_link(struct dentry *dentry, struct inode *inode) { struct inode *dir = dentry->d_parent->d_inode; const char * name = dentry->d_name.name; int namelen = dentry->d_name.len; struct super_block * sb = dir->i_sb; struct minix_sb_info * sbi = minix_sb(sb); struct page *page = NULL; unsigned long npages = dir_pages(dir); unsigned long n; char *kaddr, *p; minix_dirent *de; minix3_dirent *de3; loff_t pos; int err; char *namx = NULL; __u32 inumber; /* * We take care of directory expansion in the same loop * This code plays outside i_size, so it locks the page * to protect that region. */ for (n = 0; n <= npages; n++) { char *limit, *dir_end; page = dir_get_page(dir, n); err = PTR_ERR(page); if (IS_ERR(page)) goto out; lock_page(page); kaddr = (char*)page_address(page); dir_end = kaddr + minix_last_byte(dir, n); limit = kaddr + PAGE_CACHE_SIZE - sbi->s_dirsize; for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) { de = (minix_dirent *)p; de3 = (minix3_dirent *)p; if (sbi->s_version == MINIX_V3) { namx = de3->name; inumber = de3->inode; } else { namx = de->name; inumber = de->inode; } if (p == dir_end) { /* We hit i_size */ if (sbi->s_version == MINIX_V3) de3->inode = 0; else de->inode = 0; goto got_it; } if (!inumber) goto got_it; err = -EEXIST; if (namecompare(namelen, sbi->s_namelen, name, namx)) goto out_unlock; } unlock_page(page); dir_put_page(page); } BUG(); return -EINVAL; got_it: pos = page_offset(page) + p - (char *)page_address(page); err = minix_prepare_chunk(page, pos, sbi->s_dirsize); if (err) goto out_unlock; memcpy (namx, name, namelen); if (sbi->s_version == MINIX_V3) { memset (namx + namelen, 0, sbi->s_dirsize - namelen - 4); de3->inode = inode->i_ino; } else { memset (namx + namelen, 0, sbi->s_dirsize - namelen - 2); de->inode = inode->i_ino; } err = dir_commit_chunk(page, pos, sbi->s_dirsize); dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(dir); out_put: dir_put_page(page); out: return err; out_unlock: unlock_page(page); goto out_put; } int minix_delete_entry(struct minix_dir_entry *de, struct page *page) { struct inode *inode = page->mapping->host; char *kaddr = page_address(page); loff_t pos = page_offset(page) + (char*)de - kaddr; struct minix_sb_info *sbi = minix_sb(inode->i_sb); unsigned len = sbi->s_dirsize; int err; lock_page(page); err = minix_prepare_chunk(page, pos, len); if (err == 0) { if (sbi->s_version == MINIX_V3) ((minix3_dirent *) de)->inode = 0; else de->inode = 0; err = dir_commit_chunk(page, pos, len); } else { unlock_page(page); } dir_put_page(page); inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC; mark_inode_dirty(inode); return err; } int minix_make_empty(struct inode *inode, struct inode *dir) { struct page *page = grab_cache_page(inode->i_mapping, 0); struct minix_sb_info *sbi = minix_sb(inode->i_sb); char *kaddr; int err; if (!page) return -ENOMEM; err = minix_prepare_chunk(page, 0, 2 * sbi->s_dirsize); if (err) { unlock_page(page); goto fail; } kaddr = kmap_atomic(page); memset(kaddr, 0, PAGE_CACHE_SIZE); if (sbi->s_version == MINIX_V3) { minix3_dirent *de3 = (minix3_dirent *)kaddr; de3->inode = inode->i_ino; strcpy(de3->name, "."); de3 = minix_next_entry(de3, sbi); de3->inode = dir->i_ino; strcpy(de3->name, ".."); } else { minix_dirent *de = (minix_dirent *)kaddr; de->inode = inode->i_ino; strcpy(de->name, "."); de = minix_next_entry(de, sbi); de->inode = dir->i_ino; strcpy(de->name, ".."); } kunmap_atomic(kaddr); err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize); fail: page_cache_release(page); return err; } /* * routine to check that the specified directory is empty (for rmdir) */ int minix_empty_dir(struct inode * inode) { struct page *page = NULL; unsigned long i, npages = dir_pages(inode); struct minix_sb_info *sbi = minix_sb(inode->i_sb); char *name; __u32 inumber; for (i = 0; i < npages; i++) { char *p, *kaddr, *limit; page = dir_get_page(inode, i); if (IS_ERR(page)) continue; kaddr = (char *)page_address(page); limit = kaddr + minix_last_byte(inode, i) - sbi->s_dirsize; for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) { if (sbi->s_version == MINIX_V3) { minix3_dirent *de3 = (minix3_dirent *)p; name = de3->name; inumber = de3->inode; } else { minix_dirent *de = (minix_dirent *)p; name = de->name; inumber = de->inode; } if (inumber != 0) { /* check for . and .. */ if (name[0] != '.') goto not_empty; if (!name[1]) { if (inumber != inode->i_ino) goto not_empty; } else if (name[1] != '.') goto not_empty; else if (name[2]) goto not_empty; } } dir_put_page(page); } return 1; not_empty: dir_put_page(page); return 0; } /* Releases the page */ void minix_set_link(struct minix_dir_entry *de, struct page *page, struct inode *inode) { struct inode *dir = page->mapping->host; struct minix_sb_info *sbi = minix_sb(dir->i_sb); loff_t pos = page_offset(page) + (char *)de-(char*)page_address(page); int err; lock_page(page); err = minix_prepare_chunk(page, pos, sbi->s_dirsize); if (err == 0) { if (sbi->s_version == MINIX_V3) ((minix3_dirent *) de)->inode = inode->i_ino; else de->inode = inode->i_ino; err = dir_commit_chunk(page, pos, sbi->s_dirsize); } else { unlock_page(page); } dir_put_page(page); dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(dir); } struct minix_dir_entry * minix_dotdot (struct inode *dir, struct page **p) { struct page *page = dir_get_page(dir, 0); struct minix_sb_info *sbi = minix_sb(dir->i_sb); struct minix_dir_entry *de = NULL; if (!IS_ERR(page)) { de = minix_next_entry(page_address(page), sbi); *p = page; } return de; } ino_t minix_inode_by_name(struct dentry *dentry) { struct page *page; struct minix_dir_entry *de = minix_find_entry(dentry, &page); ino_t res = 0; if (de) { struct address_space *mapping = page->mapping; struct inode *inode = mapping->host; struct minix_sb_info *sbi = minix_sb(inode->i_sb); if (sbi->s_version == MINIX_V3) res = ((minix3_dirent *) de)->inode; else res = de->inode; dir_put_page(page); } return res; }
gpl-2.0
ssahani/systemd
src/test/test-mountpoint-util.c
6
13890
/* SPDX-License-Identifier: LGPL-2.1-or-later */ #include <sched.h> #include <sys/mount.h> #include <unistd.h> #include "alloc-util.h" #include "constants.h" #include "fd-util.h" #include "fileio.h" #include "hashmap.h" #include "log.h" #include "mountpoint-util.h" #include "path-util.h" #include "rm-rf.h" #include "string-util.h" #include "tests.h" #include "tmpfile-util.h" static void test_mount_propagation_flags_one(const char *name, int ret, unsigned long expected) { unsigned long flags; log_info("/* %s(%s) */", __func__, strnull(name)); assert_se(mount_propagation_flags_from_string(name, &flags) == ret); if (ret >= 0) { const char *c; assert_se(flags == expected); c = mount_propagation_flags_to_string(flags); if (isempty(name)) assert_se(isempty(c)); else assert_se(streq(c, name)); } } TEST(mount_propagation_flags) { test_mount_propagation_flags_one("shared", 0, MS_SHARED); test_mount_propagation_flags_one("slave", 0, MS_SLAVE); test_mount_propagation_flags_one("private", 0, MS_PRIVATE); test_mount_propagation_flags_one(NULL, 0, 0); test_mount_propagation_flags_one("", 0, 0); test_mount_propagation_flags_one("xxxx", -EINVAL, 0); test_mount_propagation_flags_one(" ", -EINVAL, 0); } TEST(mnt_id) { _cleanup_fclose_ FILE *f = NULL; _cleanup_hashmap_free_free_ Hashmap *h = NULL; char *p; void *k; int r; assert_se(f = fopen("/proc/self/mountinfo", "re")); assert_se(h = hashmap_new(&trivial_hash_ops)); for (;;) { _cleanup_free_ char *line = NULL, *path = NULL; int mnt_id; r = read_line(f, LONG_LINE_MAX, &line); if (r == 0) break; assert_se(r > 0); assert_se(sscanf(line, "%i %*s %*s %*s %ms", &mnt_id, &path) == 2); #if HAS_FEATURE_MEMORY_SANITIZER /* We don't know the length of the string, so we need to unpoison it one char at a time */ for (const char *c = path; ;c++) { msan_unpoison(c, 1); if (!*c) break; } #endif log_debug("mountinfo: %s → %i", path, mnt_id); assert_se(hashmap_put(h, INT_TO_PTR(mnt_id), path) >= 0); path = NULL; } HASHMAP_FOREACH_KEY(p, k, h) { int mnt_id = PTR_TO_INT(k), mnt_id2; const char *q; r = path_get_mnt_id(p, &mnt_id2); if (r < 0) { log_debug_errno(r, "Failed to get the mnt id of %s: %m", p); continue; } if (mnt_id == mnt_id2) { log_debug("mnt ids of %s is %i.", p, mnt_id); continue; } else log_debug("mnt ids of %s are %i (from /proc/self/mountinfo), %i (from path_get_mnt_id()).", p, mnt_id, mnt_id2); /* The ids don't match? This can easily happen e.g. running with "unshare --mount-proc". * See #11505. */ assert_se(q = hashmap_get(h, INT_TO_PTR(mnt_id2))); assert_se((r = path_is_mount_point(p, NULL, 0)) >= 0); if (r == 0) { /* If the path is not a mount point anymore, then it must be a sub directory of * the path corresponds to mnt_id2. */ log_debug("The path %s for mnt id %i is not a mount point.", p, mnt_id2); assert_se(!isempty(path_startswith(p, q))); } else { /* If the path is still a mount point, then it must be equivalent to the path * corresponds to mnt_id2 */ log_debug("There are multiple mounts on the same path %s.", p); assert_se(path_equal(p, q)); } } } TEST(path_is_mount_point) { int fd; char tmp_dir[] = "/tmp/test-path-is-mount-point-XXXXXX"; _cleanup_free_ char *file1 = NULL, *file2 = NULL, *link1 = NULL, *link2 = NULL; _cleanup_free_ char *dir1 = NULL, *dir1file = NULL, *dirlink1 = NULL, *dirlink1file = NULL; _cleanup_free_ char *dir2 = NULL, *dir2file = NULL; assert_se(path_is_mount_point("/", NULL, AT_SYMLINK_FOLLOW) > 0); assert_se(path_is_mount_point("/", NULL, 0) > 0); assert_se(path_is_mount_point("//", NULL, AT_SYMLINK_FOLLOW) > 0); assert_se(path_is_mount_point("//", NULL, 0) > 0); assert_se(path_is_mount_point("/proc", NULL, AT_SYMLINK_FOLLOW) > 0); assert_se(path_is_mount_point("/proc", NULL, 0) > 0); assert_se(path_is_mount_point("/proc/", NULL, AT_SYMLINK_FOLLOW) > 0); assert_se(path_is_mount_point("/proc/", NULL, 0) > 0); assert_se(path_is_mount_point("/proc/1", NULL, AT_SYMLINK_FOLLOW) == 0); assert_se(path_is_mount_point("/proc/1", NULL, 0) == 0); assert_se(path_is_mount_point("/proc/1/", NULL, AT_SYMLINK_FOLLOW) == 0); assert_se(path_is_mount_point("/proc/1/", NULL, 0) == 0); assert_se(path_is_mount_point("/sys", NULL, AT_SYMLINK_FOLLOW) > 0); assert_se(path_is_mount_point("/sys", NULL, 0) > 0); assert_se(path_is_mount_point("/sys/", NULL, AT_SYMLINK_FOLLOW) > 0); assert_se(path_is_mount_point("/sys/", NULL, 0) > 0); /* we'll create a hierarchy of different kinds of dir/file/link * layouts: * * <tmp>/file1, <tmp>/file2 * <tmp>/link1 -> file1, <tmp>/link2 -> file2 * <tmp>/dir1/ * <tmp>/dir1/file * <tmp>/dirlink1 -> dir1 * <tmp>/dirlink1file -> dirlink1/file * <tmp>/dir2/ * <tmp>/dir2/file */ /* file mountpoints */ assert_se(mkdtemp(tmp_dir) != NULL); file1 = path_join(tmp_dir, "file1"); assert_se(file1); file2 = path_join(tmp_dir, "file2"); assert_se(file2); fd = open(file1, O_WRONLY|O_CREAT|O_EXCL|O_CLOEXEC, 0664); assert_se(fd > 0); close(fd); fd = open(file2, O_WRONLY|O_CREAT|O_EXCL|O_CLOEXEC, 0664); assert_se(fd > 0); close(fd); link1 = path_join(tmp_dir, "link1"); assert_se(link1); assert_se(symlink("file1", link1) == 0); link2 = path_join(tmp_dir, "link2"); assert_se(link1); assert_se(symlink("file2", link2) == 0); assert_se(path_is_mount_point(file1, NULL, AT_SYMLINK_FOLLOW) == 0); assert_se(path_is_mount_point(file1, NULL, 0) == 0); assert_se(path_is_mount_point(link1, NULL, AT_SYMLINK_FOLLOW) == 0); assert_se(path_is_mount_point(link1, NULL, 0) == 0); /* directory mountpoints */ dir1 = path_join(tmp_dir, "dir1"); assert_se(dir1); assert_se(mkdir(dir1, 0755) == 0); dirlink1 = path_join(tmp_dir, "dirlink1"); assert_se(dirlink1); assert_se(symlink("dir1", dirlink1) == 0); dirlink1file = path_join(tmp_dir, "dirlink1file"); assert_se(dirlink1file); assert_se(symlink("dirlink1/file", dirlink1file) == 0); dir2 = path_join(tmp_dir, "dir2"); assert_se(dir2); assert_se(mkdir(dir2, 0755) == 0); assert_se(path_is_mount_point(dir1, NULL, AT_SYMLINK_FOLLOW) == 0); assert_se(path_is_mount_point(dir1, NULL, 0) == 0); assert_se(path_is_mount_point(dirlink1, NULL, AT_SYMLINK_FOLLOW) == 0); assert_se(path_is_mount_point(dirlink1, NULL, 0) == 0); /* file in subdirectory mountpoints */ dir1file = path_join(dir1, "file"); assert_se(dir1file); fd = open(dir1file, O_WRONLY|O_CREAT|O_EXCL|O_CLOEXEC, 0664); assert_se(fd > 0); close(fd); assert_se(path_is_mount_point(dir1file, NULL, AT_SYMLINK_FOLLOW) == 0); assert_se(path_is_mount_point(dir1file, NULL, 0) == 0); assert_se(path_is_mount_point(dirlink1file, NULL, AT_SYMLINK_FOLLOW) == 0); assert_se(path_is_mount_point(dirlink1file, NULL, 0) == 0); /* these tests will only work as root */ if (mount(file1, file2, NULL, MS_BIND, NULL) >= 0) { int rf, rt, rdf, rdt, rlf, rlt, rl1f, rl1t; const char *file2d; /* files */ /* capture results in vars, to avoid dangling mounts on failure */ log_info("%s: %s", __func__, file2); rf = path_is_mount_point(file2, NULL, 0); rt = path_is_mount_point(file2, NULL, AT_SYMLINK_FOLLOW); file2d = strjoina(file2, "/"); log_info("%s: %s", __func__, file2d); rdf = path_is_mount_point(file2d, NULL, 0); rdt = path_is_mount_point(file2d, NULL, AT_SYMLINK_FOLLOW); log_info("%s: %s", __func__, link2); rlf = path_is_mount_point(link2, NULL, 0); rlt = path_is_mount_point(link2, NULL, AT_SYMLINK_FOLLOW); assert_se(umount(file2) == 0); assert_se(rf == 1); assert_se(rt == 1); assert_se(rdf == -ENOTDIR); assert_se(rdt == -ENOTDIR); assert_se(rlf == 0); assert_se(rlt == 1); /* dirs */ dir2file = path_join(dir2, "file"); assert_se(dir2file); fd = open(dir2file, O_WRONLY|O_CREAT|O_EXCL|O_CLOEXEC, 0664); assert_se(fd > 0); close(fd); assert_se(mount(dir2, dir1, NULL, MS_BIND, NULL) >= 0); log_info("%s: %s", __func__, dir1); rf = path_is_mount_point(dir1, NULL, 0); rt = path_is_mount_point(dir1, NULL, AT_SYMLINK_FOLLOW); log_info("%s: %s", __func__, dirlink1); rlf = path_is_mount_point(dirlink1, NULL, 0); rlt = path_is_mount_point(dirlink1, NULL, AT_SYMLINK_FOLLOW); log_info("%s: %s", __func__, dirlink1file); /* its parent is a mount point, but not /file itself */ rl1f = path_is_mount_point(dirlink1file, NULL, 0); rl1t = path_is_mount_point(dirlink1file, NULL, AT_SYMLINK_FOLLOW); assert_se(umount(dir1) == 0); assert_se(rf == 1); assert_se(rt == 1); assert_se(rlf == 0); assert_se(rlt == 1); assert_se(rl1f == 0); assert_se(rl1t == 0); } else log_info("Skipping bind mount file test"); assert_se(rm_rf(tmp_dir, REMOVE_ROOT|REMOVE_PHYSICAL) == 0); } TEST(fd_is_mount_point) { _cleanup_close_ int fd = -1; int r; fd = open("/", O_RDONLY|O_CLOEXEC|O_DIRECTORY|O_NOCTTY); assert_se(fd >= 0); /* Not allowed, since "/" is a path, not a plain filename */ assert_se(fd_is_mount_point(fd, "/", 0) == -EINVAL); assert_se(fd_is_mount_point(fd, ".", 0) == -EINVAL); assert_se(fd_is_mount_point(fd, "./", 0) == -EINVAL); assert_se(fd_is_mount_point(fd, "..", 0) == -EINVAL); assert_se(fd_is_mount_point(fd, "../", 0) == -EINVAL); assert_se(fd_is_mount_point(fd, "", 0) == -EINVAL); assert_se(fd_is_mount_point(fd, "/proc", 0) == -EINVAL); assert_se(fd_is_mount_point(fd, "/proc/", 0) == -EINVAL); assert_se(fd_is_mount_point(fd, "proc/sys", 0) == -EINVAL); assert_se(fd_is_mount_point(fd, "proc/sys/", 0) == -EINVAL); /* This one definitely is a mount point */ assert_se(fd_is_mount_point(fd, "proc", 0) > 0); assert_se(fd_is_mount_point(fd, "proc/", 0) > 0); /* /root's entire reason for being is to be on the root file system (i.e. not in /home/ which * might be split off), so that the user can always log in, so it cannot be a mount point unless * the system is borked. Let's allow for it to be missing though. */ assert_se(IN_SET(fd_is_mount_point(fd, "root", 0), -ENOENT, 0)); assert_se(IN_SET(fd_is_mount_point(fd, "root/", 0), -ENOENT, 0)); safe_close(fd); fd = open("/proc", O_RDONLY|O_CLOEXEC|O_DIRECTORY|O_NOCTTY); assert_se(fd >= 0); assert_se(fd_is_mount_point(fd, NULL, 0) > 0); assert_se(fd_is_mount_point(fd, "", 0) == -EINVAL); assert_se(fd_is_mount_point(fd, "version", 0) == 0); safe_close(fd); fd = open("/proc/version", O_RDONLY|O_CLOEXEC|O_NOCTTY); assert_se(fd >= 0); r = fd_is_mount_point(fd, NULL, 0); assert_se(IN_SET(r, 0, -ENOTDIR)); /* on old kernels we can't determine if regular files are mount points if we have no directory fd */ assert_se(fd_is_mount_point(fd, "", 0) == -EINVAL); } static int intro(void) { /* let's move into our own mount namespace with all propagation from the host turned off, so * that /proc/self/mountinfo is static and constant for the whole time our test runs. */ if (unshare(CLONE_NEWNS) < 0) { if (!ERRNO_IS_PRIVILEGE(errno)) return log_error_errno(errno, "Failed to detach mount namespace: %m"); log_notice("Lacking privilege to create separate mount namespace, proceeding in originating mount namespace."); } else assert_se(mount(NULL, "/", NULL, MS_PRIVATE | MS_REC, NULL) >= 0); return EXIT_SUCCESS; } DEFINE_TEST_MAIN_WITH_INTRO(LOG_DEBUG, intro);
gpl-2.0
dh-electronics/linux-am35x
arch/arm/mach-versatile/core.c
518
21754
/* * linux/arch/arm/mach-versatile/core.c * * Copyright (C) 1999 - 2003 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> #include <linux/sysdev.h> #include <linux/interrupt.h> #include <linux/irqdomain.h> #include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/amba/bus.h> #include <linux/amba/clcd.h> #include <linux/amba/pl061.h> #include <linux/amba/mmci.h> #include <linux/amba/pl022.h> #include <linux/io.h> #include <linux/gfp.h> #include <linux/clkdev.h> #include <linux/mtd/physmap.h> #include <asm/system.h> #include <asm/irq.h> #include <asm/leds.h> #include <asm/hardware/arm_timer.h> #include <asm/hardware/icst.h> #include <asm/hardware/vic.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/irq.h> #include <asm/mach/time.h> #include <asm/mach/map.h> #include <mach/hardware.h> #include <mach/platform.h> #include <asm/hardware/timer-sp.h> #include <plat/clcd.h> #include <plat/fpga-irq.h> #include <plat/sched_clock.h> #include "core.h" /* * All IO addresses are mapped onto VA 0xFFFx.xxxx, where x.xxxx * is the (PA >> 12). * * Setup a VA for the Versatile Vectored Interrupt Controller. */ #define VA_VIC_BASE __io_address(VERSATILE_VIC_BASE) #define VA_SIC_BASE __io_address(VERSATILE_SIC_BASE) static struct fpga_irq_data sic_irq = { .base = VA_SIC_BASE, .irq_start = IRQ_SIC_START, .chip.name = "SIC", }; #if 1 #define IRQ_MMCI0A IRQ_VICSOURCE22 #define IRQ_AACI IRQ_VICSOURCE24 #define IRQ_ETH IRQ_VICSOURCE25 #define PIC_MASK 0xFFD00000 #else #define IRQ_MMCI0A IRQ_SIC_MMCI0A #define IRQ_AACI IRQ_SIC_AACI #define IRQ_ETH IRQ_SIC_ETH #define PIC_MASK 0 #endif /* Lookup table for finding a DT node that represents the vic instance */ static const struct of_device_id vic_of_match[] __initconst = { { .compatible = "arm,versatile-vic", }, {} }; static const struct of_device_id sic_of_match[] __initconst = { { .compatible = "arm,versatile-sic", }, {} }; void __init versatile_init_irq(void) { vic_init(VA_VIC_BASE, IRQ_VIC_START, ~0, 0); irq_domain_generate_simple(vic_of_match, VERSATILE_VIC_BASE, IRQ_VIC_START); writel(~0, VA_SIC_BASE + SIC_IRQ_ENABLE_CLEAR); fpga_irq_init(IRQ_VICSOURCE31, ~PIC_MASK, &sic_irq); irq_domain_generate_simple(sic_of_match, VERSATILE_SIC_BASE, IRQ_SIC_START); /* * Interrupts on secondary controller from 0 to 8 are routed to * source 31 on PIC. * Interrupts from 21 to 31 are routed directly to the VIC on * the corresponding number on primary controller. This is controlled * by setting PIC_ENABLEx. */ writel(PIC_MASK, VA_SIC_BASE + SIC_INT_PIC_ENABLE); } static struct map_desc versatile_io_desc[] __initdata = { { .virtual = IO_ADDRESS(VERSATILE_SYS_BASE), .pfn = __phys_to_pfn(VERSATILE_SYS_BASE), .length = SZ_4K, .type = MT_DEVICE }, { .virtual = IO_ADDRESS(VERSATILE_SIC_BASE), .pfn = __phys_to_pfn(VERSATILE_SIC_BASE), .length = SZ_4K, .type = MT_DEVICE }, { .virtual = IO_ADDRESS(VERSATILE_VIC_BASE), .pfn = __phys_to_pfn(VERSATILE_VIC_BASE), .length = SZ_4K, .type = MT_DEVICE }, { .virtual = IO_ADDRESS(VERSATILE_SCTL_BASE), .pfn = __phys_to_pfn(VERSATILE_SCTL_BASE), .length = SZ_4K * 9, .type = MT_DEVICE }, #ifdef CONFIG_MACH_VERSATILE_AB { .virtual = IO_ADDRESS(VERSATILE_GPIO0_BASE), .pfn = __phys_to_pfn(VERSATILE_GPIO0_BASE), .length = SZ_4K, .type = MT_DEVICE }, { .virtual = IO_ADDRESS(VERSATILE_IB2_BASE), .pfn = __phys_to_pfn(VERSATILE_IB2_BASE), .length = SZ_64M, .type = MT_DEVICE }, #endif #ifdef CONFIG_DEBUG_LL { .virtual = IO_ADDRESS(VERSATILE_UART0_BASE), .pfn = __phys_to_pfn(VERSATILE_UART0_BASE), .length = SZ_4K, .type = MT_DEVICE }, #endif #ifdef CONFIG_PCI { .virtual = IO_ADDRESS(VERSATILE_PCI_CORE_BASE), .pfn = __phys_to_pfn(VERSATILE_PCI_CORE_BASE), .length = SZ_4K, .type = MT_DEVICE }, { .virtual = (unsigned long)VERSATILE_PCI_VIRT_BASE, .pfn = __phys_to_pfn(VERSATILE_PCI_BASE), .length = VERSATILE_PCI_BASE_SIZE, .type = MT_DEVICE }, { .virtual = (unsigned long)VERSATILE_PCI_CFG_VIRT_BASE, .pfn = __phys_to_pfn(VERSATILE_PCI_CFG_BASE), .length = VERSATILE_PCI_CFG_BASE_SIZE, .type = MT_DEVICE }, #if 0 { .virtual = VERSATILE_PCI_VIRT_MEM_BASE0, .pfn = __phys_to_pfn(VERSATILE_PCI_MEM_BASE0), .length = SZ_16M, .type = MT_DEVICE }, { .virtual = VERSATILE_PCI_VIRT_MEM_BASE1, .pfn = __phys_to_pfn(VERSATILE_PCI_MEM_BASE1), .length = SZ_16M, .type = MT_DEVICE }, { .virtual = VERSATILE_PCI_VIRT_MEM_BASE2, .pfn = __phys_to_pfn(VERSATILE_PCI_MEM_BASE2), .length = SZ_16M, .type = MT_DEVICE }, #endif #endif }; void __init versatile_map_io(void) { iotable_init(versatile_io_desc, ARRAY_SIZE(versatile_io_desc)); } #define VERSATILE_FLASHCTRL (__io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_FLASH_OFFSET) static void versatile_flash_set_vpp(struct platform_device *pdev, int on) { u32 val; val = __raw_readl(VERSATILE_FLASHCTRL); if (on) val |= VERSATILE_FLASHPROG_FLVPPEN; else val &= ~VERSATILE_FLASHPROG_FLVPPEN; __raw_writel(val, VERSATILE_FLASHCTRL); } static struct physmap_flash_data versatile_flash_data = { .width = 4, .set_vpp = versatile_flash_set_vpp, }; static struct resource versatile_flash_resource = { .start = VERSATILE_FLASH_BASE, .end = VERSATILE_FLASH_BASE + VERSATILE_FLASH_SIZE - 1, .flags = IORESOURCE_MEM, }; static struct platform_device versatile_flash_device = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &versatile_flash_data, }, .num_resources = 1, .resource = &versatile_flash_resource, }; static struct resource smc91x_resources[] = { [0] = { .start = VERSATILE_ETH_BASE, .end = VERSATILE_ETH_BASE + SZ_64K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_ETH, .end = IRQ_ETH, .flags = IORESOURCE_IRQ, }, }; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, }; static struct resource versatile_i2c_resource = { .start = VERSATILE_I2C_BASE, .end = VERSATILE_I2C_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }; static struct platform_device versatile_i2c_device = { .name = "versatile-i2c", .id = 0, .num_resources = 1, .resource = &versatile_i2c_resource, }; static struct i2c_board_info versatile_i2c_board_info[] = { { I2C_BOARD_INFO("ds1338", 0xd0 >> 1), }, }; static int __init versatile_i2c_init(void) { return i2c_register_board_info(0, versatile_i2c_board_info, ARRAY_SIZE(versatile_i2c_board_info)); } arch_initcall(versatile_i2c_init); #define VERSATILE_SYSMCI (__io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_MCI_OFFSET) unsigned int mmc_status(struct device *dev) { struct amba_device *adev = container_of(dev, struct amba_device, dev); u32 mask; if (adev->res.start == VERSATILE_MMCI0_BASE) mask = 1; else mask = 2; return readl(VERSATILE_SYSMCI) & mask; } static struct mmci_platform_data mmc0_plat_data = { .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, .status = mmc_status, .gpio_wp = -1, .gpio_cd = -1, }; static struct resource char_lcd_resources[] = { { .start = VERSATILE_CHAR_LCD_BASE, .end = (VERSATILE_CHAR_LCD_BASE + SZ_4K - 1), .flags = IORESOURCE_MEM, }, }; static struct platform_device char_lcd_device = { .name = "arm-charlcd", .id = -1, .num_resources = ARRAY_SIZE(char_lcd_resources), .resource = char_lcd_resources, }; /* * Clock handling */ static const struct icst_params versatile_oscvco_params = { .ref = 24000000, .vco_max = ICST307_VCO_MAX, .vco_min = ICST307_VCO_MIN, .vd_min = 4 + 8, .vd_max = 511 + 8, .rd_min = 1 + 2, .rd_max = 127 + 2, .s2div = icst307_s2div, .idx2s = icst307_idx2s, }; static void versatile_oscvco_set(struct clk *clk, struct icst_vco vco) { void __iomem *sys_lock = __io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_LOCK_OFFSET; u32 val; val = readl(clk->vcoreg) & ~0x7ffff; val |= vco.v | (vco.r << 9) | (vco.s << 16); writel(0xa05f, sys_lock); writel(val, clk->vcoreg); writel(0, sys_lock); } static const struct clk_ops osc4_clk_ops = { .round = icst_clk_round, .set = icst_clk_set, .setvco = versatile_oscvco_set, }; static struct clk osc4_clk = { .ops = &osc4_clk_ops, .params = &versatile_oscvco_params, }; /* * These are fixed clocks. */ static struct clk ref24_clk = { .rate = 24000000, }; static struct clk sp804_clk = { .rate = 1000000, }; static struct clk dummy_apb_pclk; static struct clk_lookup lookups[] = { { /* AMBA bus clock */ .con_id = "apb_pclk", .clk = &dummy_apb_pclk, }, { /* UART0 */ .dev_id = "dev:f1", .clk = &ref24_clk, }, { /* UART1 */ .dev_id = "dev:f2", .clk = &ref24_clk, }, { /* UART2 */ .dev_id = "dev:f3", .clk = &ref24_clk, }, { /* UART3 */ .dev_id = "fpga:09", .clk = &ref24_clk, }, { /* KMI0 */ .dev_id = "fpga:06", .clk = &ref24_clk, }, { /* KMI1 */ .dev_id = "fpga:07", .clk = &ref24_clk, }, { /* MMC0 */ .dev_id = "fpga:05", .clk = &ref24_clk, }, { /* MMC1 */ .dev_id = "fpga:0b", .clk = &ref24_clk, }, { /* SSP */ .dev_id = "dev:f4", .clk = &ref24_clk, }, { /* CLCD */ .dev_id = "dev:20", .clk = &osc4_clk, }, { /* SP804 timers */ .dev_id = "sp804", .clk = &sp804_clk, }, }; /* * CLCD support. */ #define SYS_CLCD_MODE_MASK (3 << 0) #define SYS_CLCD_MODE_888 (0 << 0) #define SYS_CLCD_MODE_5551 (1 << 0) #define SYS_CLCD_MODE_565_RLSB (2 << 0) #define SYS_CLCD_MODE_565_BLSB (3 << 0) #define SYS_CLCD_NLCDIOON (1 << 2) #define SYS_CLCD_VDDPOSSWITCH (1 << 3) #define SYS_CLCD_PWR3V5SWITCH (1 << 4) #define SYS_CLCD_ID_MASK (0x1f << 8) #define SYS_CLCD_ID_SANYO_3_8 (0x00 << 8) #define SYS_CLCD_ID_UNKNOWN_8_4 (0x01 << 8) #define SYS_CLCD_ID_EPSON_2_2 (0x02 << 8) #define SYS_CLCD_ID_SANYO_2_5 (0x07 << 8) #define SYS_CLCD_ID_VGA (0x1f << 8) static bool is_sanyo_2_5_lcd; /* * Disable all display connectors on the interface module. */ static void versatile_clcd_disable(struct clcd_fb *fb) { void __iomem *sys_clcd = __io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_CLCD_OFFSET; u32 val; val = readl(sys_clcd); val &= ~SYS_CLCD_NLCDIOON | SYS_CLCD_PWR3V5SWITCH; writel(val, sys_clcd); #ifdef CONFIG_MACH_VERSATILE_AB /* * If the LCD is Sanyo 2x5 in on the IB2 board, turn the back-light off */ if (machine_is_versatile_ab() && is_sanyo_2_5_lcd) { void __iomem *versatile_ib2_ctrl = __io_address(VERSATILE_IB2_CTRL); unsigned long ctrl; ctrl = readl(versatile_ib2_ctrl); ctrl &= ~0x01; writel(ctrl, versatile_ib2_ctrl); } #endif } /* * Enable the relevant connector on the interface module. */ static void versatile_clcd_enable(struct clcd_fb *fb) { struct fb_var_screeninfo *var = &fb->fb.var; void __iomem *sys_clcd = __io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_CLCD_OFFSET; u32 val; val = readl(sys_clcd); val &= ~SYS_CLCD_MODE_MASK; switch (var->green.length) { case 5: val |= SYS_CLCD_MODE_5551; break; case 6: if (var->red.offset == 0) val |= SYS_CLCD_MODE_565_RLSB; else val |= SYS_CLCD_MODE_565_BLSB; break; case 8: val |= SYS_CLCD_MODE_888; break; } /* * Set the MUX */ writel(val, sys_clcd); /* * And now enable the PSUs */ val |= SYS_CLCD_NLCDIOON | SYS_CLCD_PWR3V5SWITCH; writel(val, sys_clcd); #ifdef CONFIG_MACH_VERSATILE_AB /* * If the LCD is Sanyo 2x5 in on the IB2 board, turn the back-light on */ if (machine_is_versatile_ab() && is_sanyo_2_5_lcd) { void __iomem *versatile_ib2_ctrl = __io_address(VERSATILE_IB2_CTRL); unsigned long ctrl; ctrl = readl(versatile_ib2_ctrl); ctrl |= 0x01; writel(ctrl, versatile_ib2_ctrl); } #endif } /* * Detect which LCD panel is connected, and return the appropriate * clcd_panel structure. Note: we do not have any information on * the required timings for the 8.4in panel, so we presently assume * VGA timings. */ static int versatile_clcd_setup(struct clcd_fb *fb) { void __iomem *sys_clcd = __io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_CLCD_OFFSET; const char *panel_name; u32 val; is_sanyo_2_5_lcd = false; val = readl(sys_clcd) & SYS_CLCD_ID_MASK; if (val == SYS_CLCD_ID_SANYO_3_8) panel_name = "Sanyo TM38QV67A02A"; else if (val == SYS_CLCD_ID_SANYO_2_5) { panel_name = "Sanyo QVGA Portrait"; is_sanyo_2_5_lcd = true; } else if (val == SYS_CLCD_ID_EPSON_2_2) panel_name = "Epson L2F50113T00"; else if (val == SYS_CLCD_ID_VGA) panel_name = "VGA"; else { printk(KERN_ERR "CLCD: unknown LCD panel ID 0x%08x, using VGA\n", val); panel_name = "VGA"; } fb->panel = versatile_clcd_get_panel(panel_name); if (!fb->panel) return -EINVAL; return versatile_clcd_setup_dma(fb, SZ_1M); } static void versatile_clcd_decode(struct clcd_fb *fb, struct clcd_regs *regs) { clcdfb_decode(fb, regs); /* Always clear BGR for RGB565: we do the routing externally */ if (fb->fb.var.green.length == 6) regs->cntl &= ~CNTL_BGR; } static struct clcd_board clcd_plat_data = { .name = "Versatile", .caps = CLCD_CAP_5551 | CLCD_CAP_565 | CLCD_CAP_888, .check = clcdfb_check, .decode = versatile_clcd_decode, .disable = versatile_clcd_disable, .enable = versatile_clcd_enable, .setup = versatile_clcd_setup, .mmap = versatile_clcd_mmap_dma, .remove = versatile_clcd_remove_dma, }; static struct pl061_platform_data gpio0_plat_data = { .gpio_base = 0, .irq_base = IRQ_GPIO0_START, }; static struct pl061_platform_data gpio1_plat_data = { .gpio_base = 8, .irq_base = IRQ_GPIO1_START, }; static struct pl022_ssp_controller ssp0_plat_data = { .bus_id = 0, .enable_dma = 0, .num_chipselect = 1, }; #define AACI_IRQ { IRQ_AACI, NO_IRQ } #define MMCI0_IRQ { IRQ_MMCI0A,IRQ_SIC_MMCI0B } #define KMI0_IRQ { IRQ_SIC_KMI0, NO_IRQ } #define KMI1_IRQ { IRQ_SIC_KMI1, NO_IRQ } /* * These devices are connected directly to the multi-layer AHB switch */ #define SMC_IRQ { NO_IRQ, NO_IRQ } #define MPMC_IRQ { NO_IRQ, NO_IRQ } #define CLCD_IRQ { IRQ_CLCDINT, NO_IRQ } #define DMAC_IRQ { IRQ_DMAINT, NO_IRQ } /* * These devices are connected via the core APB bridge */ #define SCTL_IRQ { NO_IRQ, NO_IRQ } #define WATCHDOG_IRQ { IRQ_WDOGINT, NO_IRQ } #define GPIO0_IRQ { IRQ_GPIOINT0, NO_IRQ } #define GPIO1_IRQ { IRQ_GPIOINT1, NO_IRQ } #define RTC_IRQ { IRQ_RTCINT, NO_IRQ } /* * These devices are connected via the DMA APB bridge */ #define SCI_IRQ { IRQ_SCIINT, NO_IRQ } #define UART0_IRQ { IRQ_UARTINT0, NO_IRQ } #define UART1_IRQ { IRQ_UARTINT1, NO_IRQ } #define UART2_IRQ { IRQ_UARTINT2, NO_IRQ } #define SSP_IRQ { IRQ_SSPINT, NO_IRQ } /* FPGA Primecells */ AMBA_DEVICE(aaci, "fpga:04", AACI, NULL); AMBA_DEVICE(mmc0, "fpga:05", MMCI0, &mmc0_plat_data); AMBA_DEVICE(kmi0, "fpga:06", KMI0, NULL); AMBA_DEVICE(kmi1, "fpga:07", KMI1, NULL); /* DevChip Primecells */ AMBA_DEVICE(smc, "dev:00", SMC, NULL); AMBA_DEVICE(mpmc, "dev:10", MPMC, NULL); AMBA_DEVICE(clcd, "dev:20", CLCD, &clcd_plat_data); AMBA_DEVICE(dmac, "dev:30", DMAC, NULL); AMBA_DEVICE(sctl, "dev:e0", SCTL, NULL); AMBA_DEVICE(wdog, "dev:e1", WATCHDOG, NULL); AMBA_DEVICE(gpio0, "dev:e4", GPIO0, &gpio0_plat_data); AMBA_DEVICE(gpio1, "dev:e5", GPIO1, &gpio1_plat_data); AMBA_DEVICE(rtc, "dev:e8", RTC, NULL); AMBA_DEVICE(sci0, "dev:f0", SCI, NULL); AMBA_DEVICE(uart0, "dev:f1", UART0, NULL); AMBA_DEVICE(uart1, "dev:f2", UART1, NULL); AMBA_DEVICE(uart2, "dev:f3", UART2, NULL); AMBA_DEVICE(ssp0, "dev:f4", SSP, &ssp0_plat_data); static struct amba_device *amba_devs[] __initdata = { &dmac_device, &uart0_device, &uart1_device, &uart2_device, &smc_device, &mpmc_device, &clcd_device, &sctl_device, &wdog_device, &gpio0_device, &gpio1_device, &rtc_device, &sci0_device, &ssp0_device, &aaci_device, &mmc0_device, &kmi0_device, &kmi1_device, }; #ifdef CONFIG_OF /* * Lookup table for attaching a specific name and platform_data pointer to * devices as they get created by of_platform_populate(). Ideally this table * would not exist, but the current clock implementation depends on some devices * having a specific name. */ struct of_dev_auxdata versatile_auxdata_lookup[] __initdata = { OF_DEV_AUXDATA("arm,primecell", VERSATILE_MMCI0_BASE, "fpga:05", NULL), OF_DEV_AUXDATA("arm,primecell", VERSATILE_KMI0_BASE, "fpga:06", NULL), OF_DEV_AUXDATA("arm,primecell", VERSATILE_KMI1_BASE, "fpga:07", NULL), OF_DEV_AUXDATA("arm,primecell", VERSATILE_UART3_BASE, "fpga:09", NULL), OF_DEV_AUXDATA("arm,primecell", VERSATILE_MMCI1_BASE, "fpga:0b", NULL), OF_DEV_AUXDATA("arm,primecell", VERSATILE_CLCD_BASE, "dev:20", &clcd_plat_data), OF_DEV_AUXDATA("arm,primecell", VERSATILE_UART0_BASE, "dev:f1", NULL), OF_DEV_AUXDATA("arm,primecell", VERSATILE_UART1_BASE, "dev:f2", NULL), OF_DEV_AUXDATA("arm,primecell", VERSATILE_UART2_BASE, "dev:f3", NULL), OF_DEV_AUXDATA("arm,primecell", VERSATILE_SSP_BASE, "dev:f4", NULL), #if 0 /* * These entries are unnecessary because no clocks referencing * them. I've left them in for now as place holders in case * any of them need to be added back, but they should be * removed before actually committing this patch. --gcl */ OF_DEV_AUXDATA("arm,primecell", VERSATILE_AACI_BASE, "fpga:04", NULL), OF_DEV_AUXDATA("arm,primecell", VERSATILE_SCI1_BASE, "fpga:0a", NULL), OF_DEV_AUXDATA("arm,primecell", VERSATILE_SMC_BASE, "dev:00", NULL), OF_DEV_AUXDATA("arm,primecell", VERSATILE_MPMC_BASE, "dev:10", NULL), OF_DEV_AUXDATA("arm,primecell", VERSATILE_DMAC_BASE, "dev:30", NULL), OF_DEV_AUXDATA("arm,primecell", VERSATILE_SCTL_BASE, "dev:e0", NULL), OF_DEV_AUXDATA("arm,primecell", VERSATILE_WATCHDOG_BASE, "dev:e1", NULL), OF_DEV_AUXDATA("arm,primecell", VERSATILE_GPIO0_BASE, "dev:e4", NULL), OF_DEV_AUXDATA("arm,primecell", VERSATILE_GPIO1_BASE, "dev:e5", NULL), OF_DEV_AUXDATA("arm,primecell", VERSATILE_GPIO2_BASE, "dev:e6", NULL), OF_DEV_AUXDATA("arm,primecell", VERSATILE_GPIO3_BASE, "dev:e7", NULL), OF_DEV_AUXDATA("arm,primecell", VERSATILE_RTC_BASE, "dev:e8", NULL), OF_DEV_AUXDATA("arm,primecell", VERSATILE_SCI_BASE, "dev:f0", NULL), #endif {} }; #endif #ifdef CONFIG_LEDS #define VA_LEDS_BASE (__io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_LED_OFFSET) static void versatile_leds_event(led_event_t ledevt) { unsigned long flags; u32 val; local_irq_save(flags); val = readl(VA_LEDS_BASE); switch (ledevt) { case led_idle_start: val = val & ~VERSATILE_SYS_LED0; break; case led_idle_end: val = val | VERSATILE_SYS_LED0; break; case led_timer: val = val ^ VERSATILE_SYS_LED1; break; case led_halted: val = 0; break; default: break; } writel(val, VA_LEDS_BASE); local_irq_restore(flags); } #endif /* CONFIG_LEDS */ /* Early initializations */ void __init versatile_init_early(void) { void __iomem *sys = __io_address(VERSATILE_SYS_BASE); osc4_clk.vcoreg = sys + VERSATILE_SYS_OSCCLCD_OFFSET; clkdev_add_table(lookups, ARRAY_SIZE(lookups)); versatile_sched_clock_init(sys + VERSATILE_SYS_24MHz_OFFSET, 24000000); } void __init versatile_init(void) { int i; platform_device_register(&versatile_flash_device); platform_device_register(&versatile_i2c_device); platform_device_register(&smc91x_device); platform_device_register(&char_lcd_device); for (i = 0; i < ARRAY_SIZE(amba_devs); i++) { struct amba_device *d = amba_devs[i]; amba_device_register(d, &iomem_resource); } #ifdef CONFIG_LEDS leds_event = versatile_leds_event; #endif } /* * Where is the timer (VA)? */ #define TIMER0_VA_BASE __io_address(VERSATILE_TIMER0_1_BASE) #define TIMER1_VA_BASE (__io_address(VERSATILE_TIMER0_1_BASE) + 0x20) #define TIMER2_VA_BASE __io_address(VERSATILE_TIMER2_3_BASE) #define TIMER3_VA_BASE (__io_address(VERSATILE_TIMER2_3_BASE) + 0x20) /* * Set up timer interrupt, and return the current time in seconds. */ static void __init versatile_timer_init(void) { u32 val; /* * set clock frequency: * VERSATILE_REFCLK is 32KHz * VERSATILE_TIMCLK is 1MHz */ val = readl(__io_address(VERSATILE_SCTL_BASE)); writel((VERSATILE_TIMCLK << VERSATILE_TIMER1_EnSel) | (VERSATILE_TIMCLK << VERSATILE_TIMER2_EnSel) | (VERSATILE_TIMCLK << VERSATILE_TIMER3_EnSel) | (VERSATILE_TIMCLK << VERSATILE_TIMER4_EnSel) | val, __io_address(VERSATILE_SCTL_BASE)); /* * Initialise to a known state (all timers off) */ writel(0, TIMER0_VA_BASE + TIMER_CTRL); writel(0, TIMER1_VA_BASE + TIMER_CTRL); writel(0, TIMER2_VA_BASE + TIMER_CTRL); writel(0, TIMER3_VA_BASE + TIMER_CTRL); sp804_clocksource_init(TIMER3_VA_BASE, "timer3"); sp804_clockevents_init(TIMER0_VA_BASE, IRQ_TIMERINT0_1, "timer0"); } struct sys_timer versatile_timer = { .init = versatile_timer_init, };
gpl-2.0
willizambrano/android_kernel_motorola_msm8916
arch/x86/kernel/cpu/mtrr/main.c
1798
20585
/* Generic MTRR (Memory Type Range Register) driver. Copyright (C) 1997-2000 Richard Gooch Copyright (c) 2002 Patrick Mochel This library is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Library General Public License along with this library; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. Richard Gooch may be reached by email at rgooch@atnf.csiro.au The postal address is: Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia. Source: "Pentium Pro Family Developer's Manual, Volume 3: Operating System Writer's Guide" (Intel document number 242692), section 11.11.7 This was cleaned and made readable by Patrick Mochel <mochel@osdl.org> on 6-7 March 2002. Source: Intel Architecture Software Developers Manual, Volume 3: System Programming Guide; Section 9.11. (1997 edition - PPro). */ #define DEBUG #include <linux/types.h> /* FIXME: kvm_para.h needs this */ #include <linux/stop_machine.h> #include <linux/kvm_para.h> #include <linux/uaccess.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/init.h> #include <linux/sort.h> #include <linux/cpu.h> #include <linux/pci.h> #include <linux/smp.h> #include <linux/syscore_ops.h> #include <asm/processor.h> #include <asm/e820.h> #include <asm/mtrr.h> #include <asm/msr.h> #include "mtrr.h" u32 num_var_ranges; unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES]; static DEFINE_MUTEX(mtrr_mutex); u64 size_or_mask, size_and_mask; static bool mtrr_aps_delayed_init; static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM]; const struct mtrr_ops *mtrr_if; static void set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type); void set_mtrr_ops(const struct mtrr_ops *ops) { if (ops->vendor && ops->vendor < X86_VENDOR_NUM) mtrr_ops[ops->vendor] = ops; } /* Returns non-zero if we have the write-combining memory type */ static int have_wrcomb(void) { struct pci_dev *dev; dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL); if (dev != NULL) { /* * ServerWorks LE chipsets < rev 6 have problems with * write-combining. Don't allow it and leave room for other * chipsets to be tagged */ if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS && dev->device == PCI_DEVICE_ID_SERVERWORKS_LE && dev->revision <= 5) { pr_info("mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n"); pci_dev_put(dev); return 0; } /* * Intel 450NX errata # 23. Non ascending cacheline evictions to * write combining memory may resulting in data corruption */ if (dev->vendor == PCI_VENDOR_ID_INTEL && dev->device == PCI_DEVICE_ID_INTEL_82451NX) { pr_info("mtrr: Intel 450NX MMC detected. Write-combining disabled.\n"); pci_dev_put(dev); return 0; } pci_dev_put(dev); } return mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0; } /* This function returns the number of variable MTRRs */ static void __init set_num_var_ranges(void) { unsigned long config = 0, dummy; if (use_intel()) rdmsr(MSR_MTRRcap, config, dummy); else if (is_cpu(AMD)) config = 2; else if (is_cpu(CYRIX) || is_cpu(CENTAUR)) config = 8; num_var_ranges = config & 0xff; } static void __init init_table(void) { int i, max; max = num_var_ranges; for (i = 0; i < max; i++) mtrr_usage_table[i] = 1; } struct set_mtrr_data { unsigned long smp_base; unsigned long smp_size; unsigned int smp_reg; mtrr_type smp_type; }; /** * mtrr_rendezvous_handler - Work done in the synchronization handler. Executed * by all the CPUs. * @info: pointer to mtrr configuration data * * Returns nothing. */ static int mtrr_rendezvous_handler(void *info) { struct set_mtrr_data *data = info; /* * We use this same function to initialize the mtrrs during boot, * resume, runtime cpu online and on an explicit request to set a * specific MTRR. * * During boot or suspend, the state of the boot cpu's mtrrs has been * saved, and we want to replicate that across all the cpus that come * online (either at the end of boot or resume or during a runtime cpu * online). If we're doing that, @reg is set to something special and on * all the cpu's we do mtrr_if->set_all() (On the logical cpu that * started the boot/resume sequence, this might be a duplicate * set_all()). */ if (data->smp_reg != ~0U) { mtrr_if->set(data->smp_reg, data->smp_base, data->smp_size, data->smp_type); } else if (mtrr_aps_delayed_init || !cpu_online(smp_processor_id())) { mtrr_if->set_all(); } return 0; } static inline int types_compatible(mtrr_type type1, mtrr_type type2) { return type1 == MTRR_TYPE_UNCACHABLE || type2 == MTRR_TYPE_UNCACHABLE || (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) || (type1 == MTRR_TYPE_WRBACK && type2 == MTRR_TYPE_WRTHROUGH); } /** * set_mtrr - update mtrrs on all processors * @reg: mtrr in question * @base: mtrr base * @size: mtrr size * @type: mtrr type * * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly: * * 1. Queue work to do the following on all processors: * 2. Disable Interrupts * 3. Wait for all procs to do so * 4. Enter no-fill cache mode * 5. Flush caches * 6. Clear PGE bit * 7. Flush all TLBs * 8. Disable all range registers * 9. Update the MTRRs * 10. Enable all range registers * 11. Flush all TLBs and caches again * 12. Enter normal cache mode and reenable caching * 13. Set PGE * 14. Wait for buddies to catch up * 15. Enable interrupts. * * What does that mean for us? Well, stop_machine() will ensure that * the rendezvous handler is started on each CPU. And in lockstep they * do the state transition of disabling interrupts, updating MTRR's * (the CPU vendors may each do it differently, so we call mtrr_if->set() * callback and let them take care of it.) and enabling interrupts. * * Note that the mechanism is the same for UP systems, too; all the SMP stuff * becomes nops. */ static void set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type) { struct set_mtrr_data data = { .smp_reg = reg, .smp_base = base, .smp_size = size, .smp_type = type }; stop_machine(mtrr_rendezvous_handler, &data, cpu_online_mask); } static void set_mtrr_from_inactive_cpu(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type) { struct set_mtrr_data data = { .smp_reg = reg, .smp_base = base, .smp_size = size, .smp_type = type }; stop_machine_from_inactive_cpu(mtrr_rendezvous_handler, &data, cpu_callout_mask); } /** * mtrr_add_page - Add a memory type region * @base: Physical base address of region in pages (in units of 4 kB!) * @size: Physical size of region in pages (4 kB) * @type: Type of MTRR desired * @increment: If this is true do usage counting on the region * * Memory type region registers control the caching on newer Intel and * non Intel processors. This function allows drivers to request an * MTRR is added. The details and hardware specifics of each processor's * implementation are hidden from the caller, but nevertheless the * caller should expect to need to provide a power of two size on an * equivalent power of two boundary. * * If the region cannot be added either because all regions are in use * or the CPU cannot support it a negative value is returned. On success * the register number for this entry is returned, but should be treated * as a cookie only. * * On a multiprocessor machine the changes are made to all processors. * This is required on x86 by the Intel processors. * * The available types are * * %MTRR_TYPE_UNCACHABLE - No caching * * %MTRR_TYPE_WRBACK - Write data back in bursts whenever * * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts * * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes * * BUGS: Needs a quiet flag for the cases where drivers do not mind * failures and do not wish system log messages to be sent. */ int mtrr_add_page(unsigned long base, unsigned long size, unsigned int type, bool increment) { unsigned long lbase, lsize; int i, replace, error; mtrr_type ltype; if (!mtrr_if) return -ENXIO; error = mtrr_if->validate_add_page(base, size, type); if (error) return error; if (type >= MTRR_NUM_TYPES) { pr_warning("mtrr: type: %u invalid\n", type); return -EINVAL; } /* If the type is WC, check that this processor supports it */ if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) { pr_warning("mtrr: your processor doesn't support write-combining\n"); return -ENOSYS; } if (!size) { pr_warning("mtrr: zero sized request\n"); return -EINVAL; } if ((base | (base + size - 1)) >> (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) { pr_warning("mtrr: base or size exceeds the MTRR width\n"); return -EINVAL; } error = -EINVAL; replace = -1; /* No CPU hotplug when we change MTRR entries */ get_online_cpus(); /* Search for existing MTRR */ mutex_lock(&mtrr_mutex); for (i = 0; i < num_var_ranges; ++i) { mtrr_if->get(i, &lbase, &lsize, &ltype); if (!lsize || base > lbase + lsize - 1 || base + size - 1 < lbase) continue; /* * At this point we know there is some kind of * overlap/enclosure */ if (base < lbase || base + size - 1 > lbase + lsize - 1) { if (base <= lbase && base + size - 1 >= lbase + lsize - 1) { /* New region encloses an existing region */ if (type == ltype) { replace = replace == -1 ? i : -2; continue; } else if (types_compatible(type, ltype)) continue; } pr_warning("mtrr: 0x%lx000,0x%lx000 overlaps existing" " 0x%lx000,0x%lx000\n", base, size, lbase, lsize); goto out; } /* New region is enclosed by an existing region */ if (ltype != type) { if (types_compatible(type, ltype)) continue; pr_warning("mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n", base, size, mtrr_attrib_to_str(ltype), mtrr_attrib_to_str(type)); goto out; } if (increment) ++mtrr_usage_table[i]; error = i; goto out; } /* Search for an empty MTRR */ i = mtrr_if->get_free_region(base, size, replace); if (i >= 0) { set_mtrr(i, base, size, type); if (likely(replace < 0)) { mtrr_usage_table[i] = 1; } else { mtrr_usage_table[i] = mtrr_usage_table[replace]; if (increment) mtrr_usage_table[i]++; if (unlikely(replace != i)) { set_mtrr(replace, 0, 0, 0); mtrr_usage_table[replace] = 0; } } } else { pr_info("mtrr: no more MTRRs available\n"); } error = i; out: mutex_unlock(&mtrr_mutex); put_online_cpus(); return error; } static int mtrr_check(unsigned long base, unsigned long size) { if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) { pr_warning("mtrr: size and base must be multiples of 4 kiB\n"); pr_debug("mtrr: size: 0x%lx base: 0x%lx\n", size, base); dump_stack(); return -1; } return 0; } /** * mtrr_add - Add a memory type region * @base: Physical base address of region * @size: Physical size of region * @type: Type of MTRR desired * @increment: If this is true do usage counting on the region * * Memory type region registers control the caching on newer Intel and * non Intel processors. This function allows drivers to request an * MTRR is added. The details and hardware specifics of each processor's * implementation are hidden from the caller, but nevertheless the * caller should expect to need to provide a power of two size on an * equivalent power of two boundary. * * If the region cannot be added either because all regions are in use * or the CPU cannot support it a negative value is returned. On success * the register number for this entry is returned, but should be treated * as a cookie only. * * On a multiprocessor machine the changes are made to all processors. * This is required on x86 by the Intel processors. * * The available types are * * %MTRR_TYPE_UNCACHABLE - No caching * * %MTRR_TYPE_WRBACK - Write data back in bursts whenever * * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts * * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes * * BUGS: Needs a quiet flag for the cases where drivers do not mind * failures and do not wish system log messages to be sent. */ int mtrr_add(unsigned long base, unsigned long size, unsigned int type, bool increment) { if (mtrr_check(base, size)) return -EINVAL; return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type, increment); } EXPORT_SYMBOL(mtrr_add); /** * mtrr_del_page - delete a memory type region * @reg: Register returned by mtrr_add * @base: Physical base address * @size: Size of region * * If register is supplied then base and size are ignored. This is * how drivers should call it. * * Releases an MTRR region. If the usage count drops to zero the * register is freed and the region returns to default state. * On success the register is returned, on failure a negative error * code. */ int mtrr_del_page(int reg, unsigned long base, unsigned long size) { int i, max; mtrr_type ltype; unsigned long lbase, lsize; int error = -EINVAL; if (!mtrr_if) return -ENXIO; max = num_var_ranges; /* No CPU hotplug when we change MTRR entries */ get_online_cpus(); mutex_lock(&mtrr_mutex); if (reg < 0) { /* Search for existing MTRR */ for (i = 0; i < max; ++i) { mtrr_if->get(i, &lbase, &lsize, &ltype); if (lbase == base && lsize == size) { reg = i; break; } } if (reg < 0) { pr_debug("mtrr: no MTRR for %lx000,%lx000 found\n", base, size); goto out; } } if (reg >= max) { pr_warning("mtrr: register: %d too big\n", reg); goto out; } mtrr_if->get(reg, &lbase, &lsize, &ltype); if (lsize < 1) { pr_warning("mtrr: MTRR %d not used\n", reg); goto out; } if (mtrr_usage_table[reg] < 1) { pr_warning("mtrr: reg: %d has count=0\n", reg); goto out; } if (--mtrr_usage_table[reg] < 1) set_mtrr(reg, 0, 0, 0); error = reg; out: mutex_unlock(&mtrr_mutex); put_online_cpus(); return error; } /** * mtrr_del - delete a memory type region * @reg: Register returned by mtrr_add * @base: Physical base address * @size: Size of region * * If register is supplied then base and size are ignored. This is * how drivers should call it. * * Releases an MTRR region. If the usage count drops to zero the * register is freed and the region returns to default state. * On success the register is returned, on failure a negative error * code. */ int mtrr_del(int reg, unsigned long base, unsigned long size) { if (mtrr_check(base, size)) return -EINVAL; return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT); } EXPORT_SYMBOL(mtrr_del); /* * HACK ALERT! * These should be called implicitly, but we can't yet until all the initcall * stuff is done... */ static void __init init_ifs(void) { #ifndef CONFIG_X86_64 amd_init_mtrr(); cyrix_init_mtrr(); centaur_init_mtrr(); #endif } /* The suspend/resume methods are only for CPU without MTRR. CPU using generic * MTRR driver doesn't require this */ struct mtrr_value { mtrr_type ltype; unsigned long lbase; unsigned long lsize; }; static struct mtrr_value mtrr_value[MTRR_MAX_VAR_RANGES]; static int mtrr_save(void) { int i; for (i = 0; i < num_var_ranges; i++) { mtrr_if->get(i, &mtrr_value[i].lbase, &mtrr_value[i].lsize, &mtrr_value[i].ltype); } return 0; } static void mtrr_restore(void) { int i; for (i = 0; i < num_var_ranges; i++) { if (mtrr_value[i].lsize) { set_mtrr(i, mtrr_value[i].lbase, mtrr_value[i].lsize, mtrr_value[i].ltype); } } } static struct syscore_ops mtrr_syscore_ops = { .suspend = mtrr_save, .resume = mtrr_restore, }; int __initdata changed_by_mtrr_cleanup; #define SIZE_OR_MASK_BITS(n) (~((1ULL << ((n) - PAGE_SHIFT)) - 1)) /** * mtrr_bp_init - initialize mtrrs on the boot CPU * * This needs to be called early; before any of the other CPUs are * initialized (i.e. before smp_init()). * */ void __init mtrr_bp_init(void) { u32 phys_addr; init_ifs(); phys_addr = 32; if (cpu_has_mtrr) { mtrr_if = &generic_mtrr_ops; size_or_mask = SIZE_OR_MASK_BITS(36); size_and_mask = 0x00f00000; phys_addr = 36; /* * This is an AMD specific MSR, but we assume(hope?) that * Intel will implement it too when they extend the address * bus of the Xeon. */ if (cpuid_eax(0x80000000) >= 0x80000008) { phys_addr = cpuid_eax(0x80000008) & 0xff; /* CPUID workaround for Intel 0F33/0F34 CPU */ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && boot_cpu_data.x86 == 0xF && boot_cpu_data.x86_model == 0x3 && (boot_cpu_data.x86_mask == 0x3 || boot_cpu_data.x86_mask == 0x4)) phys_addr = 36; size_or_mask = SIZE_OR_MASK_BITS(phys_addr); size_and_mask = ~size_or_mask & 0xfffff00000ULL; } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR && boot_cpu_data.x86 == 6) { /* * VIA C* family have Intel style MTRRs, * but don't support PAE */ size_or_mask = SIZE_OR_MASK_BITS(32); size_and_mask = 0; phys_addr = 32; } } else { switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_AMD: if (cpu_has_k6_mtrr) { /* Pre-Athlon (K6) AMD CPU MTRRs */ mtrr_if = mtrr_ops[X86_VENDOR_AMD]; size_or_mask = SIZE_OR_MASK_BITS(32); size_and_mask = 0; } break; case X86_VENDOR_CENTAUR: if (cpu_has_centaur_mcr) { mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR]; size_or_mask = SIZE_OR_MASK_BITS(32); size_and_mask = 0; } break; case X86_VENDOR_CYRIX: if (cpu_has_cyrix_arr) { mtrr_if = mtrr_ops[X86_VENDOR_CYRIX]; size_or_mask = SIZE_OR_MASK_BITS(32); size_and_mask = 0; } break; default: break; } } if (mtrr_if) { set_num_var_ranges(); init_table(); if (use_intel()) { get_mtrr_state(); if (mtrr_cleanup(phys_addr)) { changed_by_mtrr_cleanup = 1; mtrr_if->set_all(); } } } } void mtrr_ap_init(void) { if (!use_intel() || mtrr_aps_delayed_init) return; /* * Ideally we should hold mtrr_mutex here to avoid mtrr entries * changed, but this routine will be called in cpu boot time, * holding the lock breaks it. * * This routine is called in two cases: * * 1. very earily time of software resume, when there absolutely * isn't mtrr entry changes; * * 2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug * lock to prevent mtrr entry changes */ set_mtrr_from_inactive_cpu(~0U, 0, 0, 0); } /** * Save current fixed-range MTRR state of the first cpu in cpu_online_mask. */ void mtrr_save_state(void) { int first_cpu; get_online_cpus(); first_cpu = cpumask_first(cpu_online_mask); smp_call_function_single(first_cpu, mtrr_save_fixed_ranges, NULL, 1); put_online_cpus(); } void set_mtrr_aps_delayed_init(void) { if (!use_intel()) return; mtrr_aps_delayed_init = true; } /* * Delayed MTRR initialization for all AP's */ void mtrr_aps_init(void) { if (!use_intel()) return; /* * Check if someone has requested the delay of AP MTRR initialization, * by doing set_mtrr_aps_delayed_init(), prior to this point. If not, * then we are done. */ if (!mtrr_aps_delayed_init) return; set_mtrr(~0U, 0, 0, 0); mtrr_aps_delayed_init = false; } void mtrr_bp_restore(void) { if (!use_intel()) return; mtrr_if->set_all(); } static int __init mtrr_init_finialize(void) { if (!mtrr_if) return 0; if (use_intel()) { if (!changed_by_mtrr_cleanup) mtrr_state_warn(); return 0; } /* * The CPU has no MTRR and seems to not support SMP. They have * specific drivers, we use a tricky method to support * suspend/resume for them. * * TBD: is there any system with such CPU which supports * suspend/resume? If no, we should remove the code. */ register_syscore_ops(&mtrr_syscore_ops); return 0; } subsys_initcall(mtrr_init_finialize);
gpl-2.0
prasidh09/cse506
unionfs-3.10.y/drivers/usb/host/isp116x-hcd.c
1798
44299
/* * ISP116x HCD (Host Controller Driver) for USB. * * Derived from the SL811 HCD, rewritten for ISP116x. * Copyright (C) 2005 Olav Kongas <ok@artecdesign.ee> * * Portions: * Copyright (C) 2004 Psion Teklogix (for NetBook PRO) * Copyright (C) 2004 David Brownell * * Periodic scheduling is based on Roman's OHCI code * Copyright (C) 1999 Roman Weissgaerber * */ /* * The driver basically works. A number of people have used it with a range * of devices. * * The driver passes all usbtests 1-14. * * Suspending/resuming of root hub via sysfs works. Remote wakeup works too. * And suspending/resuming of platform device works too. Suspend/resume * via HCD operations vector is not implemented. * * Iso transfer support is not implemented. Adding this would include * implementing recovery from the failure to service the processed ITL * fifo ram in time, which will involve chip reset. * * TODO: + More testing of suspend/resume. */ /* ISP116x chips require certain delays between accesses to its registers. The following timing options exist. 1. Configure your memory controller (the best) 2. Implement platform-specific delay function possibly combined with configuring the memory controller; see include/linux/usb-isp116x.h for more info. Some broken memory controllers line LH7A400 SMC need this. Also, uncomment for that to work the following USE_PLATFORM_DELAY macro. 3. Use ndelay (easiest, poorest). For that, uncomment the following USE_NDELAY macro. */ #define USE_PLATFORM_DELAY //#define USE_NDELAY //#define DEBUG //#define VERBOSE /* Transfer descriptors. See dump_ptd() for printout format */ //#define PTD_TRACE /* enqueuing/finishing log of urbs */ //#define URB_TRACE #include <linux/module.h> #include <linux/delay.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/usb/isp116x.h> #include <linux/usb/hcd.h> #include <linux/platform_device.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/byteorder.h> #include "isp116x.h" #define DRIVER_VERSION "03 Nov 2005" #define DRIVER_DESC "ISP116x USB Host Controller Driver" MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); static const char hcd_name[] = "isp116x-hcd"; /*-----------------------------------------------------------------*/ /* Write len bytes to fifo, pad till 32-bit boundary */ static void write_ptddata_to_fifo(struct isp116x *isp116x, void *buf, int len) { u8 *dp = (u8 *) buf; u16 *dp2 = (u16 *) buf; u16 w; int quot = len % 4; /* buffer is already in 'usb data order', which is LE. */ /* When reading buffer as u16, we have to take care byte order */ /* doesn't get mixed up */ if ((unsigned long)dp2 & 1) { /* not aligned */ for (; len > 1; len -= 2) { w = *dp++; w |= *dp++ << 8; isp116x_raw_write_data16(isp116x, w); } if (len) isp116x_write_data16(isp116x, (u16) * dp); } else { /* aligned */ for (; len > 1; len -= 2) { /* Keep byte order ! */ isp116x_raw_write_data16(isp116x, cpu_to_le16(*dp2++)); } if (len) isp116x_write_data16(isp116x, 0xff & *((u8 *) dp2)); } if (quot == 1 || quot == 2) isp116x_raw_write_data16(isp116x, 0); } /* Read len bytes from fifo and then read till 32-bit boundary. */ static void read_ptddata_from_fifo(struct isp116x *isp116x, void *buf, int len) { u8 *dp = (u8 *) buf; u16 *dp2 = (u16 *) buf; u16 w; int quot = len % 4; /* buffer is already in 'usb data order', which is LE. */ /* When reading buffer as u16, we have to take care byte order */ /* doesn't get mixed up */ if ((unsigned long)dp2 & 1) { /* not aligned */ for (; len > 1; len -= 2) { w = isp116x_raw_read_data16(isp116x); *dp++ = w & 0xff; *dp++ = (w >> 8) & 0xff; } if (len) *dp = 0xff & isp116x_read_data16(isp116x); } else { /* aligned */ for (; len > 1; len -= 2) { /* Keep byte order! */ *dp2++ = le16_to_cpu(isp116x_raw_read_data16(isp116x)); } if (len) *(u8 *) dp2 = 0xff & isp116x_read_data16(isp116x); } if (quot == 1 || quot == 2) isp116x_raw_read_data16(isp116x); } /* Write ptd's and data for scheduled transfers into the fifo ram. Fifo must be empty and ready. */ static void pack_fifo(struct isp116x *isp116x) { struct isp116x_ep *ep; struct ptd *ptd; int buflen = isp116x->atl_last_dir == PTD_DIR_IN ? isp116x->atl_bufshrt : isp116x->atl_buflen; isp116x_write_reg16(isp116x, HCuPINT, HCuPINT_AIIEOT); isp116x_write_reg16(isp116x, HCXFERCTR, buflen); isp116x_write_addr(isp116x, HCATLPORT | ISP116x_WRITE_OFFSET); for (ep = isp116x->atl_active; ep; ep = ep->active) { ptd = &ep->ptd; dump_ptd(ptd); dump_ptd_out_data(ptd, ep->data); isp116x_write_data16(isp116x, ptd->count); isp116x_write_data16(isp116x, ptd->mps); isp116x_write_data16(isp116x, ptd->len); isp116x_write_data16(isp116x, ptd->faddr); buflen -= sizeof(struct ptd); /* Skip writing data for last IN PTD */ if (ep->active || (isp116x->atl_last_dir != PTD_DIR_IN)) { write_ptddata_to_fifo(isp116x, ep->data, ep->length); buflen -= ALIGN(ep->length, 4); } } BUG_ON(buflen); } /* Read the processed ptd's and data from fifo ram back to URBs' buffers. Fifo must be full and done */ static void unpack_fifo(struct isp116x *isp116x) { struct isp116x_ep *ep; struct ptd *ptd; int buflen = isp116x->atl_last_dir == PTD_DIR_IN ? isp116x->atl_buflen : isp116x->atl_bufshrt; isp116x_write_reg16(isp116x, HCuPINT, HCuPINT_AIIEOT); isp116x_write_reg16(isp116x, HCXFERCTR, buflen); isp116x_write_addr(isp116x, HCATLPORT); for (ep = isp116x->atl_active; ep; ep = ep->active) { ptd = &ep->ptd; ptd->count = isp116x_read_data16(isp116x); ptd->mps = isp116x_read_data16(isp116x); ptd->len = isp116x_read_data16(isp116x); ptd->faddr = isp116x_read_data16(isp116x); buflen -= sizeof(struct ptd); /* Skip reading data for last Setup or Out PTD */ if (ep->active || (isp116x->atl_last_dir == PTD_DIR_IN)) { read_ptddata_from_fifo(isp116x, ep->data, ep->length); buflen -= ALIGN(ep->length, 4); } dump_ptd(ptd); dump_ptd_in_data(ptd, ep->data); } BUG_ON(buflen); } /*---------------------------------------------------------------*/ /* Set up PTD's. */ static void preproc_atl_queue(struct isp116x *isp116x) { struct isp116x_ep *ep; struct urb *urb; struct ptd *ptd; u16 len; for (ep = isp116x->atl_active; ep; ep = ep->active) { u16 toggle = 0, dir = PTD_DIR_SETUP; BUG_ON(list_empty(&ep->hep->urb_list)); urb = container_of(ep->hep->urb_list.next, struct urb, urb_list); ptd = &ep->ptd; len = ep->length; ep->data = (unsigned char *)urb->transfer_buffer + urb->actual_length; switch (ep->nextpid) { case USB_PID_IN: toggle = usb_gettoggle(urb->dev, ep->epnum, 0); dir = PTD_DIR_IN; break; case USB_PID_OUT: toggle = usb_gettoggle(urb->dev, ep->epnum, 1); dir = PTD_DIR_OUT; break; case USB_PID_SETUP: len = sizeof(struct usb_ctrlrequest); ep->data = urb->setup_packet; break; case USB_PID_ACK: toggle = 1; len = 0; dir = (urb->transfer_buffer_length && usb_pipein(urb->pipe)) ? PTD_DIR_OUT : PTD_DIR_IN; break; default: ERR("%s %d: ep->nextpid %d\n", __func__, __LINE__, ep->nextpid); BUG(); } ptd->count = PTD_CC_MSK | PTD_ACTIVE_MSK | PTD_TOGGLE(toggle); ptd->mps = PTD_MPS(ep->maxpacket) | PTD_SPD(urb->dev->speed == USB_SPEED_LOW) | PTD_EP(ep->epnum); ptd->len = PTD_LEN(len) | PTD_DIR(dir); ptd->faddr = PTD_FA(usb_pipedevice(urb->pipe)); if (!ep->active) { ptd->mps |= PTD_LAST_MSK; isp116x->atl_last_dir = dir; } isp116x->atl_bufshrt = sizeof(struct ptd) + isp116x->atl_buflen; isp116x->atl_buflen = isp116x->atl_bufshrt + ALIGN(len, 4); } } /* Take done or failed requests out of schedule. Give back processed urbs. */ static void finish_request(struct isp116x *isp116x, struct isp116x_ep *ep, struct urb *urb, int status) __releases(isp116x->lock) __acquires(isp116x->lock) { unsigned i; ep->error_count = 0; if (usb_pipecontrol(urb->pipe)) ep->nextpid = USB_PID_SETUP; urb_dbg(urb, "Finish"); usb_hcd_unlink_urb_from_ep(isp116x_to_hcd(isp116x), urb); spin_unlock(&isp116x->lock); usb_hcd_giveback_urb(isp116x_to_hcd(isp116x), urb, status); spin_lock(&isp116x->lock); /* take idle endpoints out of the schedule */ if (!list_empty(&ep->hep->urb_list)) return; /* async deschedule */ if (!list_empty(&ep->schedule)) { list_del_init(&ep->schedule); return; } /* periodic deschedule */ DBG("deschedule qh%d/%p branch %d\n", ep->period, ep, ep->branch); for (i = ep->branch; i < PERIODIC_SIZE; i += ep->period) { struct isp116x_ep *temp; struct isp116x_ep **prev = &isp116x->periodic[i]; while (*prev && ((temp = *prev) != ep)) prev = &temp->next; if (*prev) *prev = ep->next; isp116x->load[i] -= ep->load; } ep->branch = PERIODIC_SIZE; isp116x_to_hcd(isp116x)->self.bandwidth_allocated -= ep->load / ep->period; /* switch irq type? */ if (!--isp116x->periodic_count) { isp116x->irqenb &= ~HCuPINT_SOF; isp116x->irqenb |= HCuPINT_ATL; } } /* Analyze transfer results, handle partial transfers and errors */ static void postproc_atl_queue(struct isp116x *isp116x) { struct isp116x_ep *ep; struct urb *urb; struct usb_device *udev; struct ptd *ptd; int short_not_ok; int status; u8 cc; for (ep = isp116x->atl_active; ep; ep = ep->active) { BUG_ON(list_empty(&ep->hep->urb_list)); urb = container_of(ep->hep->urb_list.next, struct urb, urb_list); udev = urb->dev; ptd = &ep->ptd; cc = PTD_GET_CC(ptd); short_not_ok = 1; status = -EINPROGRESS; /* Data underrun is special. For allowed underrun we clear the error and continue as normal. For forbidden underrun we finish the DATA stage immediately while for control transfer, we do a STATUS stage. */ if (cc == TD_DATAUNDERRUN) { if (!(urb->transfer_flags & URB_SHORT_NOT_OK) || usb_pipecontrol(urb->pipe)) { DBG("Allowed or control data underrun\n"); cc = TD_CC_NOERROR; short_not_ok = 0; } else { ep->error_count = 1; usb_settoggle(udev, ep->epnum, ep->nextpid == USB_PID_OUT, PTD_GET_TOGGLE(ptd)); urb->actual_length += PTD_GET_COUNT(ptd); status = cc_to_error[TD_DATAUNDERRUN]; goto done; } } if (cc != TD_CC_NOERROR && cc != TD_NOTACCESSED && (++ep->error_count >= 3 || cc == TD_CC_STALL || cc == TD_DATAOVERRUN)) { status = cc_to_error[cc]; if (ep->nextpid == USB_PID_ACK) ep->nextpid = 0; goto done; } /* According to usb spec, zero-length Int transfer signals finishing of the urb. Hey, does this apply only for IN endpoints? */ if (usb_pipeint(urb->pipe) && !PTD_GET_LEN(ptd)) { status = 0; goto done; } /* Relax after previously failed, but later succeeded or correctly NAK'ed retransmission attempt */ if (ep->error_count && (cc == TD_CC_NOERROR || cc == TD_NOTACCESSED)) ep->error_count = 0; /* Take into account idiosyncracies of the isp116x chip regarding toggle bit for failed transfers */ if (ep->nextpid == USB_PID_OUT) usb_settoggle(udev, ep->epnum, 1, PTD_GET_TOGGLE(ptd) ^ (ep->error_count > 0)); else if (ep->nextpid == USB_PID_IN) usb_settoggle(udev, ep->epnum, 0, PTD_GET_TOGGLE(ptd) ^ (ep->error_count > 0)); switch (ep->nextpid) { case USB_PID_IN: case USB_PID_OUT: urb->actual_length += PTD_GET_COUNT(ptd); if (PTD_GET_ACTIVE(ptd) || (cc != TD_CC_NOERROR && cc < 0x0E)) break; if (urb->transfer_buffer_length != urb->actual_length) { if (short_not_ok) break; } else { if (urb->transfer_flags & URB_ZERO_PACKET && ep->nextpid == USB_PID_OUT && !(PTD_GET_COUNT(ptd) % ep->maxpacket)) { DBG("Zero packet requested\n"); break; } } /* All data for this URB is transferred, let's finish */ if (usb_pipecontrol(urb->pipe)) ep->nextpid = USB_PID_ACK; else status = 0; break; case USB_PID_SETUP: if (PTD_GET_ACTIVE(ptd) || (cc != TD_CC_NOERROR && cc < 0x0E)) break; if (urb->transfer_buffer_length == urb->actual_length) ep->nextpid = USB_PID_ACK; else if (usb_pipeout(urb->pipe)) { usb_settoggle(udev, 0, 1, 1); ep->nextpid = USB_PID_OUT; } else { usb_settoggle(udev, 0, 0, 1); ep->nextpid = USB_PID_IN; } break; case USB_PID_ACK: if (PTD_GET_ACTIVE(ptd) || (cc != TD_CC_NOERROR && cc < 0x0E)) break; status = 0; ep->nextpid = 0; break; default: BUG(); } done: if (status != -EINPROGRESS || urb->unlinked) finish_request(isp116x, ep, urb, status); } } /* Scan transfer lists, schedule transfers, send data off to chip. */ static void start_atl_transfers(struct isp116x *isp116x) { struct isp116x_ep *last_ep = NULL, *ep; struct urb *urb; u16 load = 0; int len, index, speed, byte_time; if (atomic_read(&isp116x->atl_finishing)) return; if (!HC_IS_RUNNING(isp116x_to_hcd(isp116x)->state)) return; /* FIFO not empty? */ if (isp116x_read_reg16(isp116x, HCBUFSTAT) & HCBUFSTAT_ATL_FULL) return; isp116x->atl_active = NULL; isp116x->atl_buflen = isp116x->atl_bufshrt = 0; /* Schedule int transfers */ if (isp116x->periodic_count) { isp116x->fmindex = index = (isp116x->fmindex + 1) & (PERIODIC_SIZE - 1); if ((load = isp116x->load[index])) { /* Bring all int transfers for this frame into the active queue */ isp116x->atl_active = last_ep = isp116x->periodic[index]; while (last_ep->next) last_ep = (last_ep->active = last_ep->next); last_ep->active = NULL; } } /* Schedule control/bulk transfers */ list_for_each_entry(ep, &isp116x->async, schedule) { urb = container_of(ep->hep->urb_list.next, struct urb, urb_list); speed = urb->dev->speed; byte_time = speed == USB_SPEED_LOW ? BYTE_TIME_LOWSPEED : BYTE_TIME_FULLSPEED; if (ep->nextpid == USB_PID_SETUP) { len = sizeof(struct usb_ctrlrequest); } else if (ep->nextpid == USB_PID_ACK) { len = 0; } else { /* Find current free length ... */ len = (MAX_LOAD_LIMIT - load) / byte_time; /* ... then limit it to configured max size ... */ len = min(len, speed == USB_SPEED_LOW ? MAX_TRANSFER_SIZE_LOWSPEED : MAX_TRANSFER_SIZE_FULLSPEED); /* ... and finally cut to the multiple of MaxPacketSize, or to the real length if there's enough room. */ if (len < (urb->transfer_buffer_length - urb->actual_length)) { len -= len % ep->maxpacket; if (!len) continue; } else len = urb->transfer_buffer_length - urb->actual_length; BUG_ON(len < 0); } load += len * byte_time; if (load > MAX_LOAD_LIMIT) break; ep->active = NULL; ep->length = len; if (last_ep) last_ep->active = ep; else isp116x->atl_active = ep; last_ep = ep; } /* Avoid starving of endpoints */ if ((&isp116x->async)->next != (&isp116x->async)->prev) list_move(&isp116x->async, (&isp116x->async)->next); if (isp116x->atl_active) { preproc_atl_queue(isp116x); pack_fifo(isp116x); } } /* Finish the processed transfers */ static void finish_atl_transfers(struct isp116x *isp116x) { if (!isp116x->atl_active) return; /* Fifo not ready? */ if (!(isp116x_read_reg16(isp116x, HCBUFSTAT) & HCBUFSTAT_ATL_DONE)) return; atomic_inc(&isp116x->atl_finishing); unpack_fifo(isp116x); postproc_atl_queue(isp116x); atomic_dec(&isp116x->atl_finishing); } static irqreturn_t isp116x_irq(struct usb_hcd *hcd) { struct isp116x *isp116x = hcd_to_isp116x(hcd); u16 irqstat; irqreturn_t ret = IRQ_NONE; spin_lock(&isp116x->lock); isp116x_write_reg16(isp116x, HCuPINTENB, 0); irqstat = isp116x_read_reg16(isp116x, HCuPINT); isp116x_write_reg16(isp116x, HCuPINT, irqstat); if (irqstat & (HCuPINT_ATL | HCuPINT_SOF)) { ret = IRQ_HANDLED; finish_atl_transfers(isp116x); } if (irqstat & HCuPINT_OPR) { u32 intstat = isp116x_read_reg32(isp116x, HCINTSTAT); isp116x_write_reg32(isp116x, HCINTSTAT, intstat); if (intstat & HCINT_UE) { ERR("Unrecoverable error, HC is dead!\n"); /* IRQ's are off, we do no DMA, perfectly ready to die ... */ hcd->state = HC_STATE_HALT; usb_hc_died(hcd); ret = IRQ_HANDLED; goto done; } if (intstat & HCINT_RHSC) /* When root hub or any of its ports is going to come out of suspend, it may take more than 10ms for status bits to stabilize. */ mod_timer(&hcd->rh_timer, jiffies + msecs_to_jiffies(20) + 1); if (intstat & HCINT_RD) { DBG("---- remote wakeup\n"); usb_hcd_resume_root_hub(hcd); } irqstat &= ~HCuPINT_OPR; ret = IRQ_HANDLED; } if (irqstat & (HCuPINT_ATL | HCuPINT_SOF)) { start_atl_transfers(isp116x); } isp116x_write_reg16(isp116x, HCuPINTENB, isp116x->irqenb); done: spin_unlock(&isp116x->lock); return ret; } /*-----------------------------------------------------------------*/ /* usb 1.1 says max 90% of a frame is available for periodic transfers. * this driver doesn't promise that much since it's got to handle an * IRQ per packet; irq handling latencies also use up that time. */ /* out of 1000 us */ #define MAX_PERIODIC_LOAD 600 static int balance(struct isp116x *isp116x, u16 period, u16 load) { int i, branch = -ENOSPC; /* search for the least loaded schedule branch of that period which has enough bandwidth left unreserved. */ for (i = 0; i < period; i++) { if (branch < 0 || isp116x->load[branch] > isp116x->load[i]) { int j; for (j = i; j < PERIODIC_SIZE; j += period) { if ((isp116x->load[j] + load) > MAX_PERIODIC_LOAD) break; } if (j < PERIODIC_SIZE) continue; branch = i; } } return branch; } /* NB! ALL the code above this point runs with isp116x->lock held, irqs off */ /*-----------------------------------------------------------------*/ static int isp116x_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) { struct isp116x *isp116x = hcd_to_isp116x(hcd); struct usb_device *udev = urb->dev; unsigned int pipe = urb->pipe; int is_out = !usb_pipein(pipe); int type = usb_pipetype(pipe); int epnum = usb_pipeendpoint(pipe); struct usb_host_endpoint *hep = urb->ep; struct isp116x_ep *ep = NULL; unsigned long flags; int i; int ret = 0; urb_dbg(urb, "Enqueue"); if (type == PIPE_ISOCHRONOUS) { ERR("Isochronous transfers not supported\n"); urb_dbg(urb, "Refused to enqueue"); return -ENXIO; } /* avoid all allocations within spinlocks: request or endpoint */ if (!hep->hcpriv) { ep = kzalloc(sizeof *ep, mem_flags); if (!ep) return -ENOMEM; } spin_lock_irqsave(&isp116x->lock, flags); if (!HC_IS_RUNNING(hcd->state)) { kfree(ep); ret = -ENODEV; goto fail_not_linked; } ret = usb_hcd_link_urb_to_ep(hcd, urb); if (ret) { kfree(ep); goto fail_not_linked; } if (hep->hcpriv) ep = hep->hcpriv; else { INIT_LIST_HEAD(&ep->schedule); ep->udev = udev; ep->epnum = epnum; ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out); usb_settoggle(udev, epnum, is_out, 0); if (type == PIPE_CONTROL) { ep->nextpid = USB_PID_SETUP; } else if (is_out) { ep->nextpid = USB_PID_OUT; } else { ep->nextpid = USB_PID_IN; } if (urb->interval) { /* With INT URBs submitted, the driver works with SOF interrupt enabled and ATL interrupt disabled. After the PTDs are written to fifo ram, the chip starts fifo processing and usb transfers after the next SOF and continues until the transfers are finished (succeeded or failed) or the frame ends. Therefore, the transfers occur only in every second frame, while fifo reading/writing and data processing occur in every other second frame. */ if (urb->interval < 2) urb->interval = 2; if (urb->interval > 2 * PERIODIC_SIZE) urb->interval = 2 * PERIODIC_SIZE; ep->period = urb->interval >> 1; ep->branch = PERIODIC_SIZE; ep->load = usb_calc_bus_time(udev->speed, !is_out, (type == PIPE_ISOCHRONOUS), usb_maxpacket(udev, pipe, is_out)) / 1000; } hep->hcpriv = ep; ep->hep = hep; } /* maybe put endpoint into schedule */ switch (type) { case PIPE_CONTROL: case PIPE_BULK: if (list_empty(&ep->schedule)) list_add_tail(&ep->schedule, &isp116x->async); break; case PIPE_INTERRUPT: urb->interval = ep->period; ep->length = min_t(u32, ep->maxpacket, urb->transfer_buffer_length); /* urb submitted for already existing endpoint */ if (ep->branch < PERIODIC_SIZE) break; ep->branch = ret = balance(isp116x, ep->period, ep->load); if (ret < 0) goto fail; ret = 0; urb->start_frame = (isp116x->fmindex & (PERIODIC_SIZE - 1)) + ep->branch; /* sort each schedule branch by period (slow before fast) to share the faster parts of the tree without needing dummy/placeholder nodes */ DBG("schedule qh%d/%p branch %d\n", ep->period, ep, ep->branch); for (i = ep->branch; i < PERIODIC_SIZE; i += ep->period) { struct isp116x_ep **prev = &isp116x->periodic[i]; struct isp116x_ep *here = *prev; while (here && ep != here) { if (ep->period > here->period) break; prev = &here->next; here = *prev; } if (ep != here) { ep->next = here; *prev = ep; } isp116x->load[i] += ep->load; } hcd->self.bandwidth_allocated += ep->load / ep->period; /* switch over to SOFint */ if (!isp116x->periodic_count++) { isp116x->irqenb &= ~HCuPINT_ATL; isp116x->irqenb |= HCuPINT_SOF; isp116x_write_reg16(isp116x, HCuPINTENB, isp116x->irqenb); } } urb->hcpriv = hep; start_atl_transfers(isp116x); fail: if (ret) usb_hcd_unlink_urb_from_ep(hcd, urb); fail_not_linked: spin_unlock_irqrestore(&isp116x->lock, flags); return ret; } /* Dequeue URBs. */ static int isp116x_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) { struct isp116x *isp116x = hcd_to_isp116x(hcd); struct usb_host_endpoint *hep; struct isp116x_ep *ep, *ep_act; unsigned long flags; int rc; spin_lock_irqsave(&isp116x->lock, flags); rc = usb_hcd_check_unlink_urb(hcd, urb, status); if (rc) goto done; hep = urb->hcpriv; ep = hep->hcpriv; WARN_ON(hep != ep->hep); /* In front of queue? */ if (ep->hep->urb_list.next == &urb->urb_list) /* active? */ for (ep_act = isp116x->atl_active; ep_act; ep_act = ep_act->active) if (ep_act == ep) { VDBG("dequeue, urb %p active; wait for irq\n", urb); urb = NULL; break; } if (urb) finish_request(isp116x, ep, urb, status); done: spin_unlock_irqrestore(&isp116x->lock, flags); return rc; } static void isp116x_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep) { int i; struct isp116x_ep *ep = hep->hcpriv; if (!ep) return; /* assume we'd just wait for the irq */ for (i = 0; i < 100 && !list_empty(&hep->urb_list); i++) msleep(3); if (!list_empty(&hep->urb_list)) WARNING("ep %p not empty?\n", ep); kfree(ep); hep->hcpriv = NULL; } static int isp116x_get_frame(struct usb_hcd *hcd) { struct isp116x *isp116x = hcd_to_isp116x(hcd); u32 fmnum; unsigned long flags; spin_lock_irqsave(&isp116x->lock, flags); fmnum = isp116x_read_reg32(isp116x, HCFMNUM); spin_unlock_irqrestore(&isp116x->lock, flags); return (int)fmnum; } /* Adapted from ohci-hub.c. Currently we don't support autosuspend. */ static int isp116x_hub_status_data(struct usb_hcd *hcd, char *buf) { struct isp116x *isp116x = hcd_to_isp116x(hcd); int ports, i, changed = 0; unsigned long flags; if (!HC_IS_RUNNING(hcd->state)) return -ESHUTDOWN; /* Report no status change now, if we are scheduled to be called later */ if (timer_pending(&hcd->rh_timer)) return 0; ports = isp116x->rhdesca & RH_A_NDP; spin_lock_irqsave(&isp116x->lock, flags); isp116x->rhstatus = isp116x_read_reg32(isp116x, HCRHSTATUS); if (isp116x->rhstatus & (RH_HS_LPSC | RH_HS_OCIC)) buf[0] = changed = 1; else buf[0] = 0; for (i = 0; i < ports; i++) { u32 status = isp116x_read_reg32(isp116x, i ? HCRHPORT2 : HCRHPORT1); if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC | RH_PS_OCIC | RH_PS_PRSC)) { changed = 1; buf[0] |= 1 << (i + 1); } } spin_unlock_irqrestore(&isp116x->lock, flags); return changed; } static void isp116x_hub_descriptor(struct isp116x *isp116x, struct usb_hub_descriptor *desc) { u32 reg = isp116x->rhdesca; desc->bDescriptorType = 0x29; desc->bDescLength = 9; desc->bHubContrCurrent = 0; desc->bNbrPorts = (u8) (reg & 0x3); /* Power switching, device type, overcurrent. */ desc->wHubCharacteristics = cpu_to_le16((u16) ((reg >> 8) & 0x1f)); desc->bPwrOn2PwrGood = (u8) ((reg >> 24) & 0xff); /* ports removable, and legacy PortPwrCtrlMask */ desc->u.hs.DeviceRemovable[0] = 0; desc->u.hs.DeviceRemovable[1] = ~0; } /* Perform reset of a given port. It would be great to just start the reset and let the USB core to clear the reset in due time. However, root hub ports should be reset for at least 50 ms, while our chip stays in reset for about 10 ms. I.e., we must repeatedly reset it ourself here. */ static inline void root_port_reset(struct isp116x *isp116x, unsigned port) { u32 tmp; unsigned long flags, t; /* Root hub reset should be 50 ms, but some devices want it even longer. */ t = jiffies + msecs_to_jiffies(100); while (time_before(jiffies, t)) { spin_lock_irqsave(&isp116x->lock, flags); /* spin until any current reset finishes */ for (;;) { tmp = isp116x_read_reg32(isp116x, port ? HCRHPORT2 : HCRHPORT1); if (!(tmp & RH_PS_PRS)) break; udelay(500); } /* Don't reset a disconnected port */ if (!(tmp & RH_PS_CCS)) { spin_unlock_irqrestore(&isp116x->lock, flags); break; } /* Reset lasts 10ms (claims datasheet) */ isp116x_write_reg32(isp116x, port ? HCRHPORT2 : HCRHPORT1, (RH_PS_PRS)); spin_unlock_irqrestore(&isp116x->lock, flags); msleep(10); } } /* Adapted from ohci-hub.c */ static int isp116x_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, char *buf, u16 wLength) { struct isp116x *isp116x = hcd_to_isp116x(hcd); int ret = 0; unsigned long flags; int ports = isp116x->rhdesca & RH_A_NDP; u32 tmp = 0; switch (typeReq) { case ClearHubFeature: DBG("ClearHubFeature: "); switch (wValue) { case C_HUB_OVER_CURRENT: DBG("C_HUB_OVER_CURRENT\n"); spin_lock_irqsave(&isp116x->lock, flags); isp116x_write_reg32(isp116x, HCRHSTATUS, RH_HS_OCIC); spin_unlock_irqrestore(&isp116x->lock, flags); case C_HUB_LOCAL_POWER: DBG("C_HUB_LOCAL_POWER\n"); break; default: goto error; } break; case SetHubFeature: DBG("SetHubFeature: "); switch (wValue) { case C_HUB_OVER_CURRENT: case C_HUB_LOCAL_POWER: DBG("C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n"); break; default: goto error; } break; case GetHubDescriptor: DBG("GetHubDescriptor\n"); isp116x_hub_descriptor(isp116x, (struct usb_hub_descriptor *)buf); break; case GetHubStatus: DBG("GetHubStatus\n"); *(__le32 *) buf = 0; break; case GetPortStatus: DBG("GetPortStatus\n"); if (!wIndex || wIndex > ports) goto error; spin_lock_irqsave(&isp116x->lock, flags); tmp = isp116x_read_reg32(isp116x, (--wIndex) ? HCRHPORT2 : HCRHPORT1); spin_unlock_irqrestore(&isp116x->lock, flags); *(__le32 *) buf = cpu_to_le32(tmp); DBG("GetPortStatus: port[%d] %08x\n", wIndex + 1, tmp); break; case ClearPortFeature: DBG("ClearPortFeature: "); if (!wIndex || wIndex > ports) goto error; wIndex--; switch (wValue) { case USB_PORT_FEAT_ENABLE: DBG("USB_PORT_FEAT_ENABLE\n"); tmp = RH_PS_CCS; break; case USB_PORT_FEAT_C_ENABLE: DBG("USB_PORT_FEAT_C_ENABLE\n"); tmp = RH_PS_PESC; break; case USB_PORT_FEAT_SUSPEND: DBG("USB_PORT_FEAT_SUSPEND\n"); tmp = RH_PS_POCI; break; case USB_PORT_FEAT_C_SUSPEND: DBG("USB_PORT_FEAT_C_SUSPEND\n"); tmp = RH_PS_PSSC; break; case USB_PORT_FEAT_POWER: DBG("USB_PORT_FEAT_POWER\n"); tmp = RH_PS_LSDA; break; case USB_PORT_FEAT_C_CONNECTION: DBG("USB_PORT_FEAT_C_CONNECTION\n"); tmp = RH_PS_CSC; break; case USB_PORT_FEAT_C_OVER_CURRENT: DBG("USB_PORT_FEAT_C_OVER_CURRENT\n"); tmp = RH_PS_OCIC; break; case USB_PORT_FEAT_C_RESET: DBG("USB_PORT_FEAT_C_RESET\n"); tmp = RH_PS_PRSC; break; default: goto error; } spin_lock_irqsave(&isp116x->lock, flags); isp116x_write_reg32(isp116x, wIndex ? HCRHPORT2 : HCRHPORT1, tmp); spin_unlock_irqrestore(&isp116x->lock, flags); break; case SetPortFeature: DBG("SetPortFeature: "); if (!wIndex || wIndex > ports) goto error; wIndex--; switch (wValue) { case USB_PORT_FEAT_SUSPEND: DBG("USB_PORT_FEAT_SUSPEND\n"); spin_lock_irqsave(&isp116x->lock, flags); isp116x_write_reg32(isp116x, wIndex ? HCRHPORT2 : HCRHPORT1, RH_PS_PSS); spin_unlock_irqrestore(&isp116x->lock, flags); break; case USB_PORT_FEAT_POWER: DBG("USB_PORT_FEAT_POWER\n"); spin_lock_irqsave(&isp116x->lock, flags); isp116x_write_reg32(isp116x, wIndex ? HCRHPORT2 : HCRHPORT1, RH_PS_PPS); spin_unlock_irqrestore(&isp116x->lock, flags); break; case USB_PORT_FEAT_RESET: DBG("USB_PORT_FEAT_RESET\n"); root_port_reset(isp116x, wIndex); break; default: goto error; } break; default: error: /* "protocol stall" on error */ DBG("PROTOCOL STALL\n"); ret = -EPIPE; } return ret; } /*-----------------------------------------------------------------*/ #ifdef CONFIG_DEBUG_FS static void dump_irq(struct seq_file *s, char *label, u16 mask) { seq_printf(s, "%s %04x%s%s%s%s%s%s\n", label, mask, mask & HCuPINT_CLKRDY ? " clkrdy" : "", mask & HCuPINT_SUSP ? " susp" : "", mask & HCuPINT_OPR ? " opr" : "", mask & HCuPINT_AIIEOT ? " eot" : "", mask & HCuPINT_ATL ? " atl" : "", mask & HCuPINT_SOF ? " sof" : ""); } static void dump_int(struct seq_file *s, char *label, u32 mask) { seq_printf(s, "%s %08x%s%s%s%s%s%s%s\n", label, mask, mask & HCINT_MIE ? " MIE" : "", mask & HCINT_RHSC ? " rhsc" : "", mask & HCINT_FNO ? " fno" : "", mask & HCINT_UE ? " ue" : "", mask & HCINT_RD ? " rd" : "", mask & HCINT_SF ? " sof" : "", mask & HCINT_SO ? " so" : ""); } static int isp116x_show_dbg(struct seq_file *s, void *unused) { struct isp116x *isp116x = s->private; seq_printf(s, "%s\n%s version %s\n", isp116x_to_hcd(isp116x)->product_desc, hcd_name, DRIVER_VERSION); if (HC_IS_SUSPENDED(isp116x_to_hcd(isp116x)->state)) { seq_printf(s, "HCD is suspended\n"); return 0; } if (!HC_IS_RUNNING(isp116x_to_hcd(isp116x)->state)) { seq_printf(s, "HCD not running\n"); return 0; } spin_lock_irq(&isp116x->lock); dump_irq(s, "hc_irq_enable", isp116x_read_reg16(isp116x, HCuPINTENB)); dump_irq(s, "hc_irq_status", isp116x_read_reg16(isp116x, HCuPINT)); dump_int(s, "hc_int_enable", isp116x_read_reg32(isp116x, HCINTENB)); dump_int(s, "hc_int_status", isp116x_read_reg32(isp116x, HCINTSTAT)); isp116x_show_regs_seq(isp116x, s); spin_unlock_irq(&isp116x->lock); seq_printf(s, "\n"); return 0; } static int isp116x_open_seq(struct inode *inode, struct file *file) { return single_open(file, isp116x_show_dbg, inode->i_private); } static const struct file_operations isp116x_debug_fops = { .open = isp116x_open_seq, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int create_debug_file(struct isp116x *isp116x) { isp116x->dentry = debugfs_create_file(hcd_name, S_IRUGO, NULL, isp116x, &isp116x_debug_fops); if (!isp116x->dentry) return -ENOMEM; return 0; } static void remove_debug_file(struct isp116x *isp116x) { debugfs_remove(isp116x->dentry); } #else #define create_debug_file(d) 0 #define remove_debug_file(d) do{}while(0) #endif /* CONFIG_DEBUG_FS */ /*-----------------------------------------------------------------*/ /* Software reset - can be called from any contect. */ static int isp116x_sw_reset(struct isp116x *isp116x) { int retries = 15; unsigned long flags; int ret = 0; spin_lock_irqsave(&isp116x->lock, flags); isp116x_write_reg16(isp116x, HCSWRES, HCSWRES_MAGIC); isp116x_write_reg32(isp116x, HCCMDSTAT, HCCMDSTAT_HCR); while (--retries) { /* It usually resets within 1 ms */ mdelay(1); if (!(isp116x_read_reg32(isp116x, HCCMDSTAT) & HCCMDSTAT_HCR)) break; } if (!retries) { ERR("Software reset timeout\n"); ret = -ETIME; } spin_unlock_irqrestore(&isp116x->lock, flags); return ret; } static int isp116x_reset(struct usb_hcd *hcd) { struct isp116x *isp116x = hcd_to_isp116x(hcd); unsigned long t; u16 clkrdy = 0; int ret, timeout = 15 /* ms */ ; ret = isp116x_sw_reset(isp116x); if (ret) return ret; t = jiffies + msecs_to_jiffies(timeout); while (time_before_eq(jiffies, t)) { msleep(4); spin_lock_irq(&isp116x->lock); clkrdy = isp116x_read_reg16(isp116x, HCuPINT) & HCuPINT_CLKRDY; spin_unlock_irq(&isp116x->lock); if (clkrdy) break; } if (!clkrdy) { ERR("Clock not ready after %dms\n", timeout); /* After sw_reset the clock won't report to be ready, if H_WAKEUP pin is high. */ ERR("Please make sure that the H_WAKEUP pin is pulled low!\n"); ret = -ENODEV; } return ret; } static void isp116x_stop(struct usb_hcd *hcd) { struct isp116x *isp116x = hcd_to_isp116x(hcd); unsigned long flags; u32 val; spin_lock_irqsave(&isp116x->lock, flags); isp116x_write_reg16(isp116x, HCuPINTENB, 0); /* Switch off ports' power, some devices don't come up after next 'insmod' without this */ val = isp116x_read_reg32(isp116x, HCRHDESCA); val &= ~(RH_A_NPS | RH_A_PSM); isp116x_write_reg32(isp116x, HCRHDESCA, val); isp116x_write_reg32(isp116x, HCRHSTATUS, RH_HS_LPS); spin_unlock_irqrestore(&isp116x->lock, flags); isp116x_sw_reset(isp116x); } /* Configure the chip. The chip must be successfully reset by now. */ static int isp116x_start(struct usb_hcd *hcd) { struct isp116x *isp116x = hcd_to_isp116x(hcd); struct isp116x_platform_data *board = isp116x->board; u32 val; unsigned long flags; spin_lock_irqsave(&isp116x->lock, flags); /* clear interrupt status and disable all interrupt sources */ isp116x_write_reg16(isp116x, HCuPINT, 0xff); isp116x_write_reg16(isp116x, HCuPINTENB, 0); val = isp116x_read_reg16(isp116x, HCCHIPID); if ((val & HCCHIPID_MASK) != HCCHIPID_MAGIC) { ERR("Invalid chip ID %04x\n", val); spin_unlock_irqrestore(&isp116x->lock, flags); return -ENODEV; } /* To be removed in future */ hcd->uses_new_polling = 1; isp116x_write_reg16(isp116x, HCITLBUFLEN, ISP116x_ITL_BUFSIZE); isp116x_write_reg16(isp116x, HCATLBUFLEN, ISP116x_ATL_BUFSIZE); /* ----- HW conf */ val = HCHWCFG_INT_ENABLE | HCHWCFG_DBWIDTH(1); if (board->sel15Kres) val |= HCHWCFG_15KRSEL; /* Remote wakeup won't work without working clock */ if (board->remote_wakeup_enable) val |= HCHWCFG_CLKNOTSTOP; if (board->oc_enable) val |= HCHWCFG_ANALOG_OC; if (board->int_act_high) val |= HCHWCFG_INT_POL; if (board->int_edge_triggered) val |= HCHWCFG_INT_TRIGGER; isp116x_write_reg16(isp116x, HCHWCFG, val); /* ----- Root hub conf */ val = (25 << 24) & RH_A_POTPGT; /* AN10003_1.pdf recommends RH_A_NPS (no power switching) to be always set. Yet, instead, we request individual port power switching. */ val |= RH_A_PSM; /* Report overcurrent per port */ val |= RH_A_OCPM; isp116x_write_reg32(isp116x, HCRHDESCA, val); isp116x->rhdesca = isp116x_read_reg32(isp116x, HCRHDESCA); val = RH_B_PPCM; isp116x_write_reg32(isp116x, HCRHDESCB, val); isp116x->rhdescb = isp116x_read_reg32(isp116x, HCRHDESCB); val = 0; if (board->remote_wakeup_enable) { if (!device_can_wakeup(hcd->self.controller)) device_init_wakeup(hcd->self.controller, 1); val |= RH_HS_DRWE; } isp116x_write_reg32(isp116x, HCRHSTATUS, val); isp116x->rhstatus = isp116x_read_reg32(isp116x, HCRHSTATUS); isp116x_write_reg32(isp116x, HCFMINTVL, 0x27782edf); hcd->state = HC_STATE_RUNNING; /* Set up interrupts */ isp116x->intenb = HCINT_MIE | HCINT_RHSC | HCINT_UE; if (board->remote_wakeup_enable) isp116x->intenb |= HCINT_RD; isp116x->irqenb = HCuPINT_ATL | HCuPINT_OPR; /* | HCuPINT_SUSP; */ isp116x_write_reg32(isp116x, HCINTENB, isp116x->intenb); isp116x_write_reg16(isp116x, HCuPINTENB, isp116x->irqenb); /* Go operational */ val = HCCONTROL_USB_OPER; if (board->remote_wakeup_enable) val |= HCCONTROL_RWE; isp116x_write_reg32(isp116x, HCCONTROL, val); /* Disable ports to avoid race in device enumeration */ isp116x_write_reg32(isp116x, HCRHPORT1, RH_PS_CCS); isp116x_write_reg32(isp116x, HCRHPORT2, RH_PS_CCS); isp116x_show_regs_log(isp116x); spin_unlock_irqrestore(&isp116x->lock, flags); return 0; } #ifdef CONFIG_PM static int isp116x_bus_suspend(struct usb_hcd *hcd) { struct isp116x *isp116x = hcd_to_isp116x(hcd); unsigned long flags; u32 val; int ret = 0; spin_lock_irqsave(&isp116x->lock, flags); val = isp116x_read_reg32(isp116x, HCCONTROL); switch (val & HCCONTROL_HCFS) { case HCCONTROL_USB_OPER: spin_unlock_irqrestore(&isp116x->lock, flags); val &= (~HCCONTROL_HCFS & ~HCCONTROL_RWE); val |= HCCONTROL_USB_SUSPEND; if (hcd->self.root_hub->do_remote_wakeup) val |= HCCONTROL_RWE; /* Wait for usb transfers to finish */ msleep(2); spin_lock_irqsave(&isp116x->lock, flags); isp116x_write_reg32(isp116x, HCCONTROL, val); spin_unlock_irqrestore(&isp116x->lock, flags); /* Wait for devices to suspend */ msleep(5); break; case HCCONTROL_USB_RESUME: isp116x_write_reg32(isp116x, HCCONTROL, (val & ~HCCONTROL_HCFS) | HCCONTROL_USB_RESET); case HCCONTROL_USB_RESET: ret = -EBUSY; default: /* HCCONTROL_USB_SUSPEND */ spin_unlock_irqrestore(&isp116x->lock, flags); break; } return ret; } static int isp116x_bus_resume(struct usb_hcd *hcd) { struct isp116x *isp116x = hcd_to_isp116x(hcd); u32 val; msleep(5); spin_lock_irq(&isp116x->lock); val = isp116x_read_reg32(isp116x, HCCONTROL); switch (val & HCCONTROL_HCFS) { case HCCONTROL_USB_SUSPEND: val &= ~HCCONTROL_HCFS; val |= HCCONTROL_USB_RESUME; isp116x_write_reg32(isp116x, HCCONTROL, val); case HCCONTROL_USB_RESUME: break; case HCCONTROL_USB_OPER: spin_unlock_irq(&isp116x->lock); return 0; default: /* HCCONTROL_USB_RESET: this may happen, when during suspension the HC lost power. Reinitialize completely */ spin_unlock_irq(&isp116x->lock); DBG("Chip has been reset while suspended. Reinit from scratch.\n"); isp116x_reset(hcd); isp116x_start(hcd); isp116x_hub_control(hcd, SetPortFeature, USB_PORT_FEAT_POWER, 1, NULL, 0); if ((isp116x->rhdesca & RH_A_NDP) == 2) isp116x_hub_control(hcd, SetPortFeature, USB_PORT_FEAT_POWER, 2, NULL, 0); return 0; } val = isp116x->rhdesca & RH_A_NDP; while (val--) { u32 stat = isp116x_read_reg32(isp116x, val ? HCRHPORT2 : HCRHPORT1); /* force global, not selective, resume */ if (!(stat & RH_PS_PSS)) continue; DBG("%s: Resuming port %d\n", __func__, val); isp116x_write_reg32(isp116x, RH_PS_POCI, val ? HCRHPORT2 : HCRHPORT1); } spin_unlock_irq(&isp116x->lock); hcd->state = HC_STATE_RESUMING; msleep(20); /* Go operational */ spin_lock_irq(&isp116x->lock); val = isp116x_read_reg32(isp116x, HCCONTROL); isp116x_write_reg32(isp116x, HCCONTROL, (val & ~HCCONTROL_HCFS) | HCCONTROL_USB_OPER); spin_unlock_irq(&isp116x->lock); hcd->state = HC_STATE_RUNNING; return 0; } #else #define isp116x_bus_suspend NULL #define isp116x_bus_resume NULL #endif static struct hc_driver isp116x_hc_driver = { .description = hcd_name, .product_desc = "ISP116x Host Controller", .hcd_priv_size = sizeof(struct isp116x), .irq = isp116x_irq, .flags = HCD_USB11, .reset = isp116x_reset, .start = isp116x_start, .stop = isp116x_stop, .urb_enqueue = isp116x_urb_enqueue, .urb_dequeue = isp116x_urb_dequeue, .endpoint_disable = isp116x_endpoint_disable, .get_frame_number = isp116x_get_frame, .hub_status_data = isp116x_hub_status_data, .hub_control = isp116x_hub_control, .bus_suspend = isp116x_bus_suspend, .bus_resume = isp116x_bus_resume, }; /*----------------------------------------------------------------*/ static int isp116x_remove(struct platform_device *pdev) { struct usb_hcd *hcd = platform_get_drvdata(pdev); struct isp116x *isp116x; struct resource *res; if (!hcd) return 0; isp116x = hcd_to_isp116x(hcd); remove_debug_file(isp116x); usb_remove_hcd(hcd); iounmap(isp116x->data_reg); res = platform_get_resource(pdev, IORESOURCE_MEM, 1); release_mem_region(res->start, 2); iounmap(isp116x->addr_reg); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, 2); usb_put_hcd(hcd); return 0; } static int isp116x_probe(struct platform_device *pdev) { struct usb_hcd *hcd; struct isp116x *isp116x; struct resource *addr, *data, *ires; void __iomem *addr_reg; void __iomem *data_reg; int irq; int ret = 0; unsigned long irqflags; if (usb_disabled()) return -ENODEV; if (pdev->num_resources < 3) { ret = -ENODEV; goto err1; } data = platform_get_resource(pdev, IORESOURCE_MEM, 0); addr = platform_get_resource(pdev, IORESOURCE_MEM, 1); ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!addr || !data || !ires) { ret = -ENODEV; goto err1; } irq = ires->start; irqflags = ires->flags & IRQF_TRIGGER_MASK; if (pdev->dev.dma_mask) { DBG("DMA not supported\n"); ret = -EINVAL; goto err1; } if (!request_mem_region(addr->start, 2, hcd_name)) { ret = -EBUSY; goto err1; } addr_reg = ioremap(addr->start, resource_size(addr)); if (addr_reg == NULL) { ret = -ENOMEM; goto err2; } if (!request_mem_region(data->start, 2, hcd_name)) { ret = -EBUSY; goto err3; } data_reg = ioremap(data->start, resource_size(data)); if (data_reg == NULL) { ret = -ENOMEM; goto err4; } /* allocate and initialize hcd */ hcd = usb_create_hcd(&isp116x_hc_driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) { ret = -ENOMEM; goto err5; } /* this rsrc_start is bogus */ hcd->rsrc_start = addr->start; isp116x = hcd_to_isp116x(hcd); isp116x->data_reg = data_reg; isp116x->addr_reg = addr_reg; spin_lock_init(&isp116x->lock); INIT_LIST_HEAD(&isp116x->async); isp116x->board = pdev->dev.platform_data; if (!isp116x->board) { ERR("Platform data structure not initialized\n"); ret = -ENODEV; goto err6; } if (isp116x_check_platform_delay(isp116x)) { ERR("USE_PLATFORM_DELAY defined, but delay function not " "implemented.\n"); ERR("See comments in drivers/usb/host/isp116x-hcd.c\n"); ret = -ENODEV; goto err6; } ret = usb_add_hcd(hcd, irq, irqflags); if (ret) goto err6; ret = create_debug_file(isp116x); if (ret) { ERR("Couldn't create debugfs entry\n"); goto err7; } return 0; err7: usb_remove_hcd(hcd); err6: usb_put_hcd(hcd); err5: iounmap(data_reg); err4: release_mem_region(data->start, 2); err3: iounmap(addr_reg); err2: release_mem_region(addr->start, 2); err1: ERR("init error, %d\n", ret); return ret; } #ifdef CONFIG_PM /* Suspend of platform device */ static int isp116x_suspend(struct platform_device *dev, pm_message_t state) { VDBG("%s: state %x\n", __func__, state.event); return 0; } /* Resume platform device */ static int isp116x_resume(struct platform_device *dev) { VDBG("%s\n", __func__); return 0; } #else #define isp116x_suspend NULL #define isp116x_resume NULL #endif /* work with hotplug and coldplug */ MODULE_ALIAS("platform:isp116x-hcd"); static struct platform_driver isp116x_driver = { .probe = isp116x_probe, .remove = isp116x_remove, .suspend = isp116x_suspend, .resume = isp116x_resume, .driver = { .name = (char *)hcd_name, .owner = THIS_MODULE, }, }; module_platform_driver(isp116x_driver);
gpl-2.0
Mirenk/android_kernel_htc_msm8974
fs/proc/consoles.c
2566
2220
/* * Copyright (c) 2010 Werner Fink, Jiri Slaby * * Licensed under GPLv2 */ #include <linux/console.h> #include <linux/kernel.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/tty_driver.h> /* * This is handler for /proc/consoles */ static int show_console_dev(struct seq_file *m, void *v) { static const struct { short flag; char name; } con_flags[] = { { CON_ENABLED, 'E' }, { CON_CONSDEV, 'C' }, { CON_BOOT, 'B' }, { CON_PRINTBUFFER, 'p' }, { CON_BRL, 'b' }, { CON_ANYTIME, 'a' }, }; char flags[ARRAY_SIZE(con_flags) + 1]; struct console *con = v; unsigned int a; dev_t dev = 0; if (con->device) { const struct tty_driver *driver; int index; driver = con->device(con, &index); if (driver) { dev = MKDEV(driver->major, driver->minor_start); dev += index; } } for (a = 0; a < ARRAY_SIZE(con_flags); a++) flags[a] = (con->flags & con_flags[a].flag) ? con_flags[a].name : ' '; flags[a] = 0; seq_setwidth(m, 21 - 1); seq_printf(m, "%s%d", con->name, con->index); seq_pad(m, ' '); seq_printf(m, "%c%c%c (%s)", con->read ? 'R' : '-', con->write ? 'W' : '-', con->unblank ? 'U' : '-', flags); if (dev) seq_printf(m, " %4d:%d", MAJOR(dev), MINOR(dev)); seq_printf(m, "\n"); return 0; } static void *c_start(struct seq_file *m, loff_t *pos) { struct console *con; loff_t off = 0; console_lock(); for_each_console(con) if (off++ == *pos) break; return con; } static void *c_next(struct seq_file *m, void *v, loff_t *pos) { struct console *con = v; ++*pos; return con->next; } static void c_stop(struct seq_file *m, void *v) { console_unlock(); } static const struct seq_operations consoles_op = { .start = c_start, .next = c_next, .stop = c_stop, .show = show_console_dev }; static int consoles_open(struct inode *inode, struct file *file) { return seq_open(file, &consoles_op); } static const struct file_operations proc_consoles_operations = { .open = consoles_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int __init proc_consoles_init(void) { proc_create("consoles", 0, NULL, &proc_consoles_operations); return 0; } module_init(proc_consoles_init);
gpl-2.0
htdevices/linux-2.6-imx
drivers/scsi/sun3_scsi.c
2822
14267
/* * Sun3 SCSI stuff by Erik Verbruggen (erik@bigmama.xtdnet.nl) * * Sun3 DMA routines added by Sam Creasey (sammy@sammy.net) * * Adapted from mac_scsinew.c: */ /* * Generic Macintosh NCR5380 driver * * Copyright 1998, Michael Schmitz <mschmitz@lbl.gov> * * derived in part from: */ /* * Generic Generic NCR5380 driver * * Copyright 1995, Russell King * * ALPHA RELEASE 1. * * For more information, please consult * * NCR 5380 Family * SCSI Protocol Controller * Databook * * NCR Microelectronics * 1635 Aeroplaza Drive * Colorado Springs, CO 80916 * 1+ (719) 578-3400 * 1+ (800) 334-5454 */ /* * This is from mac_scsi.h, but hey, maybe this is useful for Sun3 too! :) * * Options : * * PARITY - enable parity checking. Not supported. * * SCSI2 - enable support for SCSI-II tagged queueing. Untested. * * USLEEP - enable support for devices that don't disconnect. Untested. */ /* * $Log: sun3_NCR5380.c,v $ */ #define AUTOSENSE #include <linux/types.h> #include <linux/stddef.h> #include <linux/ctype.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/signal.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/blkdev.h> #include <asm/io.h> #include <asm/system.h> #include <asm/sun3ints.h> #include <asm/dvma.h> #include <asm/idprom.h> #include <asm/machines.h> /* dma on! */ #define REAL_DMA #include "scsi.h" #include "initio.h" #include <scsi/scsi_host.h> #include "sun3_scsi.h" static void NCR5380_print(struct Scsi_Host *instance); /* #define OLDDMA */ #define USE_WRAPPER /*#define RESET_BOOT */ #define DRIVER_SETUP #define NDEBUG 0 /* * BUG can be used to trigger a strange code-size related hang on 2.1 kernels */ #ifdef BUG #undef RESET_BOOT #undef DRIVER_SETUP #endif /* #define SUPPORT_TAGS */ #define ENABLE_IRQ() enable_irq( IRQ_SUN3_SCSI ); static irqreturn_t scsi_sun3_intr(int irq, void *dummy); static inline unsigned char sun3scsi_read(int reg); static inline void sun3scsi_write(int reg, int value); static int setup_can_queue = -1; module_param(setup_can_queue, int, 0); static int setup_cmd_per_lun = -1; module_param(setup_cmd_per_lun, int, 0); static int setup_sg_tablesize = -1; module_param(setup_sg_tablesize, int, 0); #ifdef SUPPORT_TAGS static int setup_use_tagged_queuing = -1; module_param(setup_use_tagged_queuing, int, 0); #endif static int setup_hostid = -1; module_param(setup_hostid, int, 0); static struct scsi_cmnd *sun3_dma_setup_done = NULL; #define AFTER_RESET_DELAY (HZ/2) /* ms to wait after hitting dma regs */ #define SUN3_DMA_DELAY 10 /* dvma buffer to allocate -- 32k should hopefully be more than sufficient */ #define SUN3_DVMA_BUFSIZE 0xe000 /* minimum number of bytes to do dma on */ #define SUN3_DMA_MINSIZE 128 static volatile unsigned char *sun3_scsi_regp; static volatile struct sun3_dma_regs *dregs; #ifdef OLDDMA static unsigned char *dmabuf = NULL; /* dma memory buffer */ #endif static struct sun3_udc_regs *udc_regs = NULL; static unsigned char *sun3_dma_orig_addr = NULL; static unsigned long sun3_dma_orig_count = 0; static int sun3_dma_active = 0; static unsigned long last_residual = 0; /* * NCR 5380 register access functions */ static inline unsigned char sun3scsi_read(int reg) { return( sun3_scsi_regp[reg] ); } static inline void sun3scsi_write(int reg, int value) { sun3_scsi_regp[reg] = value; } /* dma controller register access functions */ static inline unsigned short sun3_udc_read(unsigned char reg) { unsigned short ret; dregs->udc_addr = UDC_CSR; udelay(SUN3_DMA_DELAY); ret = dregs->udc_data; udelay(SUN3_DMA_DELAY); return ret; } static inline void sun3_udc_write(unsigned short val, unsigned char reg) { dregs->udc_addr = reg; udelay(SUN3_DMA_DELAY); dregs->udc_data = val; udelay(SUN3_DMA_DELAY); } /* * XXX: status debug */ static struct Scsi_Host *default_instance; /* * Function : int sun3scsi_detect(struct scsi_host_template * tpnt) * * Purpose : initializes mac NCR5380 driver based on the * command line / compile time port and irq definitions. * * Inputs : tpnt - template for this SCSI adapter. * * Returns : 1 if a host adapter was found, 0 if not. * */ int sun3scsi_detect(struct scsi_host_template * tpnt) { unsigned long ioaddr; static int called = 0; struct Scsi_Host *instance; /* check that this machine has an onboard 5380 */ switch(idprom->id_machtype) { case SM_SUN3|SM_3_50: case SM_SUN3|SM_3_60: break; default: return 0; } if(called) return 0; tpnt->proc_name = "Sun3 5380 SCSI"; /* setup variables */ tpnt->can_queue = (setup_can_queue > 0) ? setup_can_queue : CAN_QUEUE; tpnt->cmd_per_lun = (setup_cmd_per_lun > 0) ? setup_cmd_per_lun : CMD_PER_LUN; tpnt->sg_tablesize = (setup_sg_tablesize >= 0) ? setup_sg_tablesize : SG_TABLESIZE; if (setup_hostid >= 0) tpnt->this_id = setup_hostid; else { /* use 7 as default */ tpnt->this_id = 7; } ioaddr = (unsigned long)ioremap(IOBASE_SUN3_SCSI, PAGE_SIZE); sun3_scsi_regp = (unsigned char *)ioaddr; dregs = (struct sun3_dma_regs *)(((unsigned char *)ioaddr) + 8); if((udc_regs = dvma_malloc(sizeof(struct sun3_udc_regs))) == NULL) { printk("SUN3 Scsi couldn't allocate DVMA memory!\n"); return 0; } #ifdef OLDDMA if((dmabuf = dvma_malloc_align(SUN3_DVMA_BUFSIZE, 0x10000)) == NULL) { printk("SUN3 Scsi couldn't allocate DVMA memory!\n"); return 0; } #endif #ifdef SUPPORT_TAGS if (setup_use_tagged_queuing < 0) setup_use_tagged_queuing = USE_TAGGED_QUEUING; #endif instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata)); if(instance == NULL) return 0; default_instance = instance; instance->io_port = (unsigned long) ioaddr; instance->irq = IRQ_SUN3_SCSI; NCR5380_init(instance, 0); instance->n_io_port = 32; ((struct NCR5380_hostdata *)instance->hostdata)->ctrl = 0; if (request_irq(instance->irq, scsi_sun3_intr, 0, "Sun3SCSI-5380", instance)) { #ifndef REAL_DMA printk("scsi%d: IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); instance->irq = SCSI_IRQ_NONE; #else printk("scsi%d: IRQ%d not free, bailing out\n", instance->host_no, instance->irq); return 0; #endif } printk("scsi%d: Sun3 5380 at port %lX irq", instance->host_no, instance->io_port); if (instance->irq == SCSI_IRQ_NONE) printk ("s disabled"); else printk (" %d", instance->irq); printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d", instance->can_queue, instance->cmd_per_lun, SUN3SCSI_PUBLIC_RELEASE); printk("\nscsi%d:", instance->host_no); NCR5380_print_options(instance); printk("\n"); dregs->csr = 0; udelay(SUN3_DMA_DELAY); dregs->csr = CSR_SCSI | CSR_FIFO | CSR_INTR; udelay(SUN3_DMA_DELAY); dregs->fifo_count = 0; called = 1; #ifdef RESET_BOOT sun3_scsi_reset_boot(instance); #endif return 1; } int sun3scsi_release (struct Scsi_Host *shpnt) { if (shpnt->irq != SCSI_IRQ_NONE) free_irq(shpnt->irq, shpnt); iounmap((void *)sun3_scsi_regp); return 0; } #ifdef RESET_BOOT /* * Our 'bus reset on boot' function */ static void sun3_scsi_reset_boot(struct Scsi_Host *instance) { unsigned long end; NCR5380_local_declare(); NCR5380_setup(instance); /* * Do a SCSI reset to clean up the bus during initialization. No * messing with the queues, interrupts, or locks necessary here. */ printk( "Sun3 SCSI: resetting the SCSI bus..." ); /* switch off SCSI IRQ - catch an interrupt without IRQ bit set else */ // sun3_disable_irq( IRQ_SUN3_SCSI ); /* get in phase */ NCR5380_write( TARGET_COMMAND_REG, PHASE_SR_TO_TCR( NCR5380_read(STATUS_REG) )); /* assert RST */ NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST ); /* The min. reset hold time is 25us, so 40us should be enough */ udelay( 50 ); /* reset RST and interrupt */ NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE ); NCR5380_read( RESET_PARITY_INTERRUPT_REG ); for( end = jiffies + AFTER_RESET_DELAY; time_before(jiffies, end); ) barrier(); /* switch on SCSI IRQ again */ // sun3_enable_irq( IRQ_SUN3_SCSI ); printk( " done\n" ); } #endif const char * sun3scsi_info (struct Scsi_Host *spnt) { return ""; } // safe bits for the CSR #define CSR_GOOD 0x060f static irqreturn_t scsi_sun3_intr(int irq, void *dummy) { unsigned short csr = dregs->csr; int handled = 0; if(csr & ~CSR_GOOD) { if(csr & CSR_DMA_BUSERR) { printk("scsi%d: bus error in dma\n", default_instance->host_no); } if(csr & CSR_DMA_CONFLICT) { printk("scsi%d: dma conflict\n", default_instance->host_no); } handled = 1; } if(csr & (CSR_SDB_INT | CSR_DMA_INT)) { NCR5380_intr(irq, dummy); handled = 1; } return IRQ_RETVAL(handled); } /* * Debug stuff - to be called on NMI, or sysrq key. Use at your own risk; * reentering NCR5380_print_status seems to have ugly side effects */ /* this doesn't seem to get used at all -- sam */ #if 0 void sun3_sun3_debug (void) { unsigned long flags; NCR5380_local_declare(); if (default_instance) { local_irq_save(flags); NCR5380_print_status(default_instance); local_irq_restore(flags); } } #endif /* sun3scsi_dma_setup() -- initialize the dma controller for a read/write */ static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int write_flag) { #ifdef OLDDMA if(write_flag) memcpy(dmabuf, data, count); else { sun3_dma_orig_addr = data; sun3_dma_orig_count = count; } #else void *addr; if(sun3_dma_orig_addr != NULL) dvma_unmap(sun3_dma_orig_addr); // addr = sun3_dvma_page((unsigned long)data, (unsigned long)dmabuf); addr = (void *)dvma_map((unsigned long) data, count); sun3_dma_orig_addr = addr; sun3_dma_orig_count = count; #endif dregs->fifo_count = 0; sun3_udc_write(UDC_RESET, UDC_CSR); /* reset fifo */ dregs->csr &= ~CSR_FIFO; dregs->csr |= CSR_FIFO; /* set direction */ if(write_flag) dregs->csr |= CSR_SEND; else dregs->csr &= ~CSR_SEND; /* byte count for fifo */ dregs->fifo_count = count; sun3_udc_write(UDC_RESET, UDC_CSR); /* reset fifo */ dregs->csr &= ~CSR_FIFO; dregs->csr |= CSR_FIFO; if(dregs->fifo_count != count) { printk("scsi%d: fifo_mismatch %04x not %04x\n", default_instance->host_no, dregs->fifo_count, (unsigned int) count); NCR5380_print(default_instance); } /* setup udc */ #ifdef OLDDMA udc_regs->addr_hi = ((dvma_vtob(dmabuf) & 0xff0000) >> 8); udc_regs->addr_lo = (dvma_vtob(dmabuf) & 0xffff); #else udc_regs->addr_hi = (((unsigned long)(addr) & 0xff0000) >> 8); udc_regs->addr_lo = ((unsigned long)(addr) & 0xffff); #endif udc_regs->count = count/2; /* count in words */ udc_regs->mode_hi = UDC_MODE_HIWORD; if(write_flag) { if(count & 1) udc_regs->count++; udc_regs->mode_lo = UDC_MODE_LSEND; udc_regs->rsel = UDC_RSEL_SEND; } else { udc_regs->mode_lo = UDC_MODE_LRECV; udc_regs->rsel = UDC_RSEL_RECV; } /* announce location of regs block */ sun3_udc_write(((dvma_vtob(udc_regs) & 0xff0000) >> 8), UDC_CHN_HI); sun3_udc_write((dvma_vtob(udc_regs) & 0xffff), UDC_CHN_LO); /* set dma master on */ sun3_udc_write(0xd, UDC_MODE); /* interrupt enable */ sun3_udc_write(UDC_INT_ENABLE, UDC_CSR); return count; } static inline unsigned long sun3scsi_dma_count(struct Scsi_Host *instance) { unsigned short resid; dregs->udc_addr = 0x32; udelay(SUN3_DMA_DELAY); resid = dregs->udc_data; udelay(SUN3_DMA_DELAY); resid *= 2; return (unsigned long) resid; } static inline unsigned long sun3scsi_dma_residual(struct Scsi_Host *instance) { return last_residual; } static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted, struct scsi_cmnd *cmd, int write_flag) { if (cmd->request->cmd_type == REQ_TYPE_FS) return wanted; else return 0; } static inline int sun3scsi_dma_start(unsigned long count, unsigned char *data) { sun3_udc_write(UDC_CHN_START, UDC_CSR); return 0; } /* clean up after our dma is done */ static int sun3scsi_dma_finish(int write_flag) { unsigned short count; unsigned short fifo; int ret = 0; sun3_dma_active = 0; #if 1 // check to empty the fifo on a read if(!write_flag) { int tmo = 20000; /* .2 sec */ while(1) { if(dregs->csr & CSR_FIFO_EMPTY) break; if(--tmo <= 0) { printk("sun3scsi: fifo failed to empty!\n"); return 1; } udelay(10); } } #endif count = sun3scsi_dma_count(default_instance); #ifdef OLDDMA /* if we've finished a read, copy out the data we read */ if(sun3_dma_orig_addr) { /* check for residual bytes after dma end */ if(count && (NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH | BASR_ACK))) { printk("scsi%d: sun3_scsi_finish: read overrun baby... ", default_instance->host_no); printk("basr now %02x\n", NCR5380_read(BUS_AND_STATUS_REG)); ret = count; } /* copy in what we dma'd no matter what */ memcpy(sun3_dma_orig_addr, dmabuf, sun3_dma_orig_count); sun3_dma_orig_addr = NULL; } #else fifo = dregs->fifo_count; last_residual = fifo; /* empty bytes from the fifo which didn't make it */ if((!write_flag) && (count - fifo) == 2) { unsigned short data; unsigned char *vaddr; data = dregs->fifo_data; vaddr = (unsigned char *)dvma_btov(sun3_dma_orig_addr); vaddr += (sun3_dma_orig_count - fifo); vaddr[-2] = (data & 0xff00) >> 8; vaddr[-1] = (data & 0xff); } dvma_unmap(sun3_dma_orig_addr); sun3_dma_orig_addr = NULL; #endif sun3_udc_write(UDC_RESET, UDC_CSR); dregs->fifo_count = 0; dregs->csr &= ~CSR_SEND; /* reset fifo */ dregs->csr &= ~CSR_FIFO; dregs->csr |= CSR_FIFO; sun3_dma_setup_done = NULL; return ret; } #include "sun3_NCR5380.c" static struct scsi_host_template driver_template = { .name = SUN3_SCSI_NAME, .detect = sun3scsi_detect, .release = sun3scsi_release, .info = sun3scsi_info, .queuecommand = sun3scsi_queue_command, .eh_abort_handler = sun3scsi_abort, .eh_bus_reset_handler = sun3scsi_bus_reset, .can_queue = CAN_QUEUE, .this_id = 7, .sg_tablesize = SG_TABLESIZE, .cmd_per_lun = CMD_PER_LUN, .use_clustering = DISABLE_CLUSTERING }; #include "scsi_module.c" MODULE_LICENSE("GPL");
gpl-2.0
venkatkamesh/android_kernel_sonyz_msm8974
net/8021q/vlan_core.c
3334
8500
#include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/if_vlan.h> #include <linux/netpoll.h> #include <linux/export.h> #include "vlan.h" bool vlan_do_receive(struct sk_buff **skbp, bool last_handler) { struct sk_buff *skb = *skbp; u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK; struct net_device *vlan_dev; struct vlan_pcpu_stats *rx_stats; vlan_dev = vlan_find_dev(skb->dev, vlan_id); if (!vlan_dev) { /* Only the last call to vlan_do_receive() should change * pkt_type to PACKET_OTHERHOST */ if (vlan_id && last_handler) skb->pkt_type = PACKET_OTHERHOST; return false; } skb = *skbp = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) return false; skb->dev = vlan_dev; if (skb->pkt_type == PACKET_OTHERHOST) { /* Our lower layer thinks this is not local, let's make sure. * This allows the VLAN to have a different MAC than the * underlying device, and still route correctly. */ if (!compare_ether_addr(eth_hdr(skb)->h_dest, vlan_dev->dev_addr)) skb->pkt_type = PACKET_HOST; } if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) { unsigned int offset = skb->data - skb_mac_header(skb); /* * vlan_insert_tag expect skb->data pointing to mac header. * So change skb->data before calling it and change back to * original position later */ skb_push(skb, offset); skb = *skbp = vlan_insert_tag(skb, skb->vlan_tci); if (!skb) return false; skb_pull(skb, offset + VLAN_HLEN); skb_reset_mac_len(skb); } skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci); skb->vlan_tci = 0; rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats); u64_stats_update_begin(&rx_stats->syncp); rx_stats->rx_packets++; rx_stats->rx_bytes += skb->len; if (skb->pkt_type == PACKET_MULTICAST) rx_stats->rx_multicast++; u64_stats_update_end(&rx_stats->syncp); return true; } /* Must be invoked with rcu_read_lock or with RTNL. */ struct net_device *__vlan_find_dev_deep(struct net_device *real_dev, u16 vlan_id) { struct vlan_info *vlan_info = rcu_dereference_rtnl(real_dev->vlan_info); if (vlan_info) { return vlan_group_get_device(&vlan_info->grp, vlan_id); } else { /* * Bonding slaves do not have grp assigned to themselves. * Grp is assigned to bonding master instead. */ if (netif_is_bond_slave(real_dev)) return __vlan_find_dev_deep(real_dev->master, vlan_id); } return NULL; } EXPORT_SYMBOL(__vlan_find_dev_deep); struct net_device *vlan_dev_real_dev(const struct net_device *dev) { return vlan_dev_priv(dev)->real_dev; } EXPORT_SYMBOL(vlan_dev_real_dev); u16 vlan_dev_vlan_id(const struct net_device *dev) { return vlan_dev_priv(dev)->vlan_id; } EXPORT_SYMBOL(vlan_dev_vlan_id); static struct sk_buff *vlan_reorder_header(struct sk_buff *skb) { if (skb_cow(skb, skb_headroom(skb)) < 0) return NULL; memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN); skb->mac_header += VLAN_HLEN; skb_reset_mac_len(skb); return skb; } struct sk_buff *vlan_untag(struct sk_buff *skb) { struct vlan_hdr *vhdr; u16 vlan_tci; if (unlikely(vlan_tx_tag_present(skb))) { /* vlan_tci is already set-up so leave this for another time */ return skb; } skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) goto err_free; if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) goto err_free; vhdr = (struct vlan_hdr *) skb->data; vlan_tci = ntohs(vhdr->h_vlan_TCI); __vlan_hwaccel_put_tag(skb, vlan_tci); skb_pull_rcsum(skb, VLAN_HLEN); vlan_set_encap_proto(skb, vhdr); skb = vlan_reorder_header(skb); if (unlikely(!skb)) goto err_free; skb_reset_network_header(skb); skb_reset_transport_header(skb); return skb; err_free: kfree_skb(skb); return NULL; } /* * vlan info and vid list */ static void vlan_group_free(struct vlan_group *grp) { int i; for (i = 0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++) kfree(grp->vlan_devices_arrays[i]); } static void vlan_info_free(struct vlan_info *vlan_info) { vlan_group_free(&vlan_info->grp); kfree(vlan_info); } static void vlan_info_rcu_free(struct rcu_head *rcu) { vlan_info_free(container_of(rcu, struct vlan_info, rcu)); } static struct vlan_info *vlan_info_alloc(struct net_device *dev) { struct vlan_info *vlan_info; vlan_info = kzalloc(sizeof(struct vlan_info), GFP_KERNEL); if (!vlan_info) return NULL; vlan_info->real_dev = dev; INIT_LIST_HEAD(&vlan_info->vid_list); return vlan_info; } struct vlan_vid_info { struct list_head list; unsigned short vid; int refcount; }; static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info, unsigned short vid) { struct vlan_vid_info *vid_info; list_for_each_entry(vid_info, &vlan_info->vid_list, list) { if (vid_info->vid == vid) return vid_info; } return NULL; } static struct vlan_vid_info *vlan_vid_info_alloc(unsigned short vid) { struct vlan_vid_info *vid_info; vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL); if (!vid_info) return NULL; vid_info->vid = vid; return vid_info; } static int __vlan_vid_add(struct vlan_info *vlan_info, unsigned short vid, struct vlan_vid_info **pvid_info) { struct net_device *dev = vlan_info->real_dev; const struct net_device_ops *ops = dev->netdev_ops; struct vlan_vid_info *vid_info; int err; vid_info = vlan_vid_info_alloc(vid); if (!vid_info) return -ENOMEM; if ((dev->features & NETIF_F_HW_VLAN_FILTER) && ops->ndo_vlan_rx_add_vid) { err = ops->ndo_vlan_rx_add_vid(dev, vid); if (err) { kfree(vid_info); return err; } } list_add(&vid_info->list, &vlan_info->vid_list); vlan_info->nr_vids++; *pvid_info = vid_info; return 0; } int vlan_vid_add(struct net_device *dev, unsigned short vid) { struct vlan_info *vlan_info; struct vlan_vid_info *vid_info; bool vlan_info_created = false; int err; ASSERT_RTNL(); vlan_info = rtnl_dereference(dev->vlan_info); if (!vlan_info) { vlan_info = vlan_info_alloc(dev); if (!vlan_info) return -ENOMEM; vlan_info_created = true; } vid_info = vlan_vid_info_get(vlan_info, vid); if (!vid_info) { err = __vlan_vid_add(vlan_info, vid, &vid_info); if (err) goto out_free_vlan_info; } vid_info->refcount++; if (vlan_info_created) rcu_assign_pointer(dev->vlan_info, vlan_info); return 0; out_free_vlan_info: if (vlan_info_created) kfree(vlan_info); return err; } EXPORT_SYMBOL(vlan_vid_add); static void __vlan_vid_del(struct vlan_info *vlan_info, struct vlan_vid_info *vid_info) { struct net_device *dev = vlan_info->real_dev; const struct net_device_ops *ops = dev->netdev_ops; unsigned short vid = vid_info->vid; int err; if ((dev->features & NETIF_F_HW_VLAN_FILTER) && ops->ndo_vlan_rx_kill_vid) { err = ops->ndo_vlan_rx_kill_vid(dev, vid); if (err) { pr_warn("failed to kill vid %d for device %s\n", vid, dev->name); } } list_del(&vid_info->list); kfree(vid_info); vlan_info->nr_vids--; } void vlan_vid_del(struct net_device *dev, unsigned short vid) { struct vlan_info *vlan_info; struct vlan_vid_info *vid_info; ASSERT_RTNL(); vlan_info = rtnl_dereference(dev->vlan_info); if (!vlan_info) return; vid_info = vlan_vid_info_get(vlan_info, vid); if (!vid_info) return; vid_info->refcount--; if (vid_info->refcount == 0) { __vlan_vid_del(vlan_info, vid_info); if (vlan_info->nr_vids == 0) { RCU_INIT_POINTER(dev->vlan_info, NULL); call_rcu(&vlan_info->rcu, vlan_info_rcu_free); } } } EXPORT_SYMBOL(vlan_vid_del); int vlan_vids_add_by_dev(struct net_device *dev, const struct net_device *by_dev) { struct vlan_vid_info *vid_info; struct vlan_info *vlan_info; int err; ASSERT_RTNL(); vlan_info = rtnl_dereference(by_dev->vlan_info); if (!vlan_info) return 0; list_for_each_entry(vid_info, &vlan_info->vid_list, list) { err = vlan_vid_add(dev, vid_info->vid); if (err) goto unwind; } return 0; unwind: list_for_each_entry_continue_reverse(vid_info, &vlan_info->vid_list, list) { vlan_vid_del(dev, vid_info->vid); } return err; } EXPORT_SYMBOL(vlan_vids_add_by_dev); void vlan_vids_del_by_dev(struct net_device *dev, const struct net_device *by_dev) { struct vlan_vid_info *vid_info; struct vlan_info *vlan_info; ASSERT_RTNL(); vlan_info = rtnl_dereference(by_dev->vlan_info); if (!vlan_info) return; list_for_each_entry(vid_info, &vlan_info->vid_list, list) vlan_vid_del(dev, vid_info->vid); } EXPORT_SYMBOL(vlan_vids_del_by_dev);
gpl-2.0
InstigatorX/InstigatorX-V4-Kernel
drivers/ide/ide-legacy.c
4614
1256
#include <linux/kernel.h> #include <linux/ide.h> static void ide_legacy_init_one(struct ide_hw **hws, struct ide_hw *hw, u8 port_no, const struct ide_port_info *d, unsigned long config) { unsigned long base, ctl; int irq; if (port_no == 0) { base = 0x1f0; ctl = 0x3f6; irq = 14; } else { base = 0x170; ctl = 0x376; irq = 15; } if (!request_region(base, 8, d->name)) { printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n", d->name, base, base + 7); return; } if (!request_region(ctl, 1, d->name)) { printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n", d->name, ctl); release_region(base, 8); return; } ide_std_init_ports(hw, base, ctl); hw->irq = irq; hw->config = config; hws[port_no] = hw; } int ide_legacy_device_add(const struct ide_port_info *d, unsigned long config) { struct ide_hw hw[2], *hws[] = { NULL, NULL }; memset(&hw, 0, sizeof(hw)); if ((d->host_flags & IDE_HFLAG_QD_2ND_PORT) == 0) ide_legacy_init_one(hws, &hw[0], 0, d, config); ide_legacy_init_one(hws, &hw[1], 1, d, config); if (hws[0] == NULL && hws[1] == NULL && (d->host_flags & IDE_HFLAG_SINGLE)) return -ENOENT; return ide_host_add(d, hws, 2, NULL); } EXPORT_SYMBOL_GPL(ide_legacy_device_add);
gpl-2.0
LorDClockaN/LorDmodSaga
drivers/ide/ide-pnp.c
4614
2678
/* * This file provides autodetection for ISA PnP IDE interfaces. * It was tested with "ESS ES1868 Plug and Play AudioDrive" IDE interface. * * Copyright (C) 2000 Andrey Panin <pazke@donpac.ru> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * You should have received a copy of the GNU General Public License * (for example /usr/src/linux/COPYING); if not, write to the Free * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/pnp.h> #include <linux/ide.h> #define DRV_NAME "ide-pnp" /* Add your devices here :)) */ static struct pnp_device_id idepnp_devices[] = { /* Generic ESDI/IDE/ATA compatible hard disk controller */ {.id = "PNP0600", .driver_data = 0}, {.id = ""} }; static const struct ide_port_info ide_pnp_port_info = { .host_flags = IDE_HFLAG_NO_DMA, .chipset = ide_generic, }; static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) { struct ide_host *host; unsigned long base, ctl; int rc; struct ide_hw hw, *hws[] = { &hw }; printk(KERN_INFO DRV_NAME ": generic PnP IDE interface\n"); if (!(pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && pnp_irq_valid(dev, 0))) return -1; base = pnp_port_start(dev, 0); ctl = pnp_port_start(dev, 1); if (!request_region(base, 8, DRV_NAME)) { printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n", DRV_NAME, base, base + 7); return -EBUSY; } if (!request_region(ctl, 1, DRV_NAME)) { printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n", DRV_NAME, ctl); release_region(base, 8); return -EBUSY; } memset(&hw, 0, sizeof(hw)); ide_std_init_ports(&hw, base, ctl); hw.irq = pnp_irq(dev, 0); rc = ide_host_add(&ide_pnp_port_info, hws, 1, &host); if (rc) goto out; pnp_set_drvdata(dev, host); return 0; out: release_region(ctl, 1); release_region(base, 8); return rc; } static void idepnp_remove(struct pnp_dev *dev) { struct ide_host *host = pnp_get_drvdata(dev); ide_host_remove(host); release_region(pnp_port_start(dev, 1), 1); release_region(pnp_port_start(dev, 0), 8); } static struct pnp_driver idepnp_driver = { .name = "ide", .id_table = idepnp_devices, .probe = idepnp_probe, .remove = idepnp_remove, }; static int __init pnpide_init(void) { return pnp_register_driver(&idepnp_driver); } static void __exit pnpide_exit(void) { pnp_unregister_driver(&idepnp_driver); } module_init(pnpide_init); module_exit(pnpide_exit); MODULE_LICENSE("GPL");
gpl-2.0
MuddyPlump/android_kernel_motorola_msm8226
drivers/mfd/stmpe.c
4870
26450
/* * ST Microelectronics MFD: stmpe's driver * * Copyright (C) ST-Ericsson SA 2010 * * License Terms: GNU General Public License, version 2 * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson */ #include <linux/gpio.h> #include <linux/export.h> #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/pm.h> #include <linux/slab.h> #include <linux/mfd/core.h> #include "stmpe.h" static int __stmpe_enable(struct stmpe *stmpe, unsigned int blocks) { return stmpe->variant->enable(stmpe, blocks, true); } static int __stmpe_disable(struct stmpe *stmpe, unsigned int blocks) { return stmpe->variant->enable(stmpe, blocks, false); } static int __stmpe_reg_read(struct stmpe *stmpe, u8 reg) { int ret; ret = stmpe->ci->read_byte(stmpe, reg); if (ret < 0) dev_err(stmpe->dev, "failed to read reg %#x: %d\n", reg, ret); dev_vdbg(stmpe->dev, "rd: reg %#x => data %#x\n", reg, ret); return ret; } static int __stmpe_reg_write(struct stmpe *stmpe, u8 reg, u8 val) { int ret; dev_vdbg(stmpe->dev, "wr: reg %#x <= %#x\n", reg, val); ret = stmpe->ci->write_byte(stmpe, reg, val); if (ret < 0) dev_err(stmpe->dev, "failed to write reg %#x: %d\n", reg, ret); return ret; } static int __stmpe_set_bits(struct stmpe *stmpe, u8 reg, u8 mask, u8 val) { int ret; ret = __stmpe_reg_read(stmpe, reg); if (ret < 0) return ret; ret &= ~mask; ret |= val; return __stmpe_reg_write(stmpe, reg, ret); } static int __stmpe_block_read(struct stmpe *stmpe, u8 reg, u8 length, u8 *values) { int ret; ret = stmpe->ci->read_block(stmpe, reg, length, values); if (ret < 0) dev_err(stmpe->dev, "failed to read regs %#x: %d\n", reg, ret); dev_vdbg(stmpe->dev, "rd: reg %#x (%d) => ret %#x\n", reg, length, ret); stmpe_dump_bytes("stmpe rd: ", values, length); return ret; } static int __stmpe_block_write(struct stmpe *stmpe, u8 reg, u8 length, const u8 *values) { int ret; dev_vdbg(stmpe->dev, "wr: regs %#x (%d)\n", reg, length); stmpe_dump_bytes("stmpe wr: ", values, length); ret = stmpe->ci->write_block(stmpe, reg, length, values); if (ret < 0) dev_err(stmpe->dev, "failed to write regs %#x: %d\n", reg, ret); return ret; } /** * stmpe_enable - enable blocks on an STMPE device * @stmpe: Device to work on * @blocks: Mask of blocks (enum stmpe_block values) to enable */ int stmpe_enable(struct stmpe *stmpe, unsigned int blocks) { int ret; mutex_lock(&stmpe->lock); ret = __stmpe_enable(stmpe, blocks); mutex_unlock(&stmpe->lock); return ret; } EXPORT_SYMBOL_GPL(stmpe_enable); /** * stmpe_disable - disable blocks on an STMPE device * @stmpe: Device to work on * @blocks: Mask of blocks (enum stmpe_block values) to enable */ int stmpe_disable(struct stmpe *stmpe, unsigned int blocks) { int ret; mutex_lock(&stmpe->lock); ret = __stmpe_disable(stmpe, blocks); mutex_unlock(&stmpe->lock); return ret; } EXPORT_SYMBOL_GPL(stmpe_disable); /** * stmpe_reg_read() - read a single STMPE register * @stmpe: Device to read from * @reg: Register to read */ int stmpe_reg_read(struct stmpe *stmpe, u8 reg) { int ret; mutex_lock(&stmpe->lock); ret = __stmpe_reg_read(stmpe, reg); mutex_unlock(&stmpe->lock); return ret; } EXPORT_SYMBOL_GPL(stmpe_reg_read); /** * stmpe_reg_write() - write a single STMPE register * @stmpe: Device to write to * @reg: Register to write * @val: Value to write */ int stmpe_reg_write(struct stmpe *stmpe, u8 reg, u8 val) { int ret; mutex_lock(&stmpe->lock); ret = __stmpe_reg_write(stmpe, reg, val); mutex_unlock(&stmpe->lock); return ret; } EXPORT_SYMBOL_GPL(stmpe_reg_write); /** * stmpe_set_bits() - set the value of a bitfield in a STMPE register * @stmpe: Device to write to * @reg: Register to write * @mask: Mask of bits to set * @val: Value to set */ int stmpe_set_bits(struct stmpe *stmpe, u8 reg, u8 mask, u8 val) { int ret; mutex_lock(&stmpe->lock); ret = __stmpe_set_bits(stmpe, reg, mask, val); mutex_unlock(&stmpe->lock); return ret; } EXPORT_SYMBOL_GPL(stmpe_set_bits); /** * stmpe_block_read() - read multiple STMPE registers * @stmpe: Device to read from * @reg: First register * @length: Number of registers * @values: Buffer to write to */ int stmpe_block_read(struct stmpe *stmpe, u8 reg, u8 length, u8 *values) { int ret; mutex_lock(&stmpe->lock); ret = __stmpe_block_read(stmpe, reg, length, values); mutex_unlock(&stmpe->lock); return ret; } EXPORT_SYMBOL_GPL(stmpe_block_read); /** * stmpe_block_write() - write multiple STMPE registers * @stmpe: Device to write to * @reg: First register * @length: Number of registers * @values: Values to write */ int stmpe_block_write(struct stmpe *stmpe, u8 reg, u8 length, const u8 *values) { int ret; mutex_lock(&stmpe->lock); ret = __stmpe_block_write(stmpe, reg, length, values); mutex_unlock(&stmpe->lock); return ret; } EXPORT_SYMBOL_GPL(stmpe_block_write); /** * stmpe_set_altfunc()- set the alternate function for STMPE pins * @stmpe: Device to configure * @pins: Bitmask of pins to affect * @block: block to enable alternate functions for * * @pins is assumed to have a bit set for each of the bits whose alternate * function is to be changed, numbered according to the GPIOXY numbers. * * If the GPIO module is not enabled, this function automatically enables it in * order to perform the change. */ int stmpe_set_altfunc(struct stmpe *stmpe, u32 pins, enum stmpe_block block) { struct stmpe_variant_info *variant = stmpe->variant; u8 regaddr = stmpe->regs[STMPE_IDX_GPAFR_U_MSB]; int af_bits = variant->af_bits; int numregs = DIV_ROUND_UP(stmpe->num_gpios * af_bits, 8); int mask = (1 << af_bits) - 1; u8 regs[numregs]; int af, afperreg, ret; if (!variant->get_altfunc) return 0; afperreg = 8 / af_bits; mutex_lock(&stmpe->lock); ret = __stmpe_enable(stmpe, STMPE_BLOCK_GPIO); if (ret < 0) goto out; ret = __stmpe_block_read(stmpe, regaddr, numregs, regs); if (ret < 0) goto out; af = variant->get_altfunc(stmpe, block); while (pins) { int pin = __ffs(pins); int regoffset = numregs - (pin / afperreg) - 1; int pos = (pin % afperreg) * (8 / afperreg); regs[regoffset] &= ~(mask << pos); regs[regoffset] |= af << pos; pins &= ~(1 << pin); } ret = __stmpe_block_write(stmpe, regaddr, numregs, regs); out: mutex_unlock(&stmpe->lock); return ret; } EXPORT_SYMBOL_GPL(stmpe_set_altfunc); /* * GPIO (all variants) */ static struct resource stmpe_gpio_resources[] = { /* Start and end filled dynamically */ { .flags = IORESOURCE_IRQ, }, }; static struct mfd_cell stmpe_gpio_cell = { .name = "stmpe-gpio", .resources = stmpe_gpio_resources, .num_resources = ARRAY_SIZE(stmpe_gpio_resources), }; static struct mfd_cell stmpe_gpio_cell_noirq = { .name = "stmpe-gpio", /* gpio cell resources consist of an irq only so no resources here */ }; /* * Keypad (1601, 2401, 2403) */ static struct resource stmpe_keypad_resources[] = { { .name = "KEYPAD", .start = 0, .end = 0, .flags = IORESOURCE_IRQ, }, { .name = "KEYPAD_OVER", .start = 1, .end = 1, .flags = IORESOURCE_IRQ, }, }; static struct mfd_cell stmpe_keypad_cell = { .name = "stmpe-keypad", .resources = stmpe_keypad_resources, .num_resources = ARRAY_SIZE(stmpe_keypad_resources), }; /* * STMPE801 */ static const u8 stmpe801_regs[] = { [STMPE_IDX_CHIP_ID] = STMPE801_REG_CHIP_ID, [STMPE_IDX_ICR_LSB] = STMPE801_REG_SYS_CTRL, [STMPE_IDX_GPMR_LSB] = STMPE801_REG_GPIO_MP_STA, [STMPE_IDX_GPSR_LSB] = STMPE801_REG_GPIO_SET_PIN, [STMPE_IDX_GPCR_LSB] = STMPE801_REG_GPIO_SET_PIN, [STMPE_IDX_GPDR_LSB] = STMPE801_REG_GPIO_DIR, [STMPE_IDX_IEGPIOR_LSB] = STMPE801_REG_GPIO_INT_EN, [STMPE_IDX_ISGPIOR_MSB] = STMPE801_REG_GPIO_INT_STA, }; static struct stmpe_variant_block stmpe801_blocks[] = { { .cell = &stmpe_gpio_cell, .irq = 0, .block = STMPE_BLOCK_GPIO, }, }; static struct stmpe_variant_block stmpe801_blocks_noirq[] = { { .cell = &stmpe_gpio_cell_noirq, .block = STMPE_BLOCK_GPIO, }, }; static int stmpe801_enable(struct stmpe *stmpe, unsigned int blocks, bool enable) { if (blocks & STMPE_BLOCK_GPIO) return 0; else return -EINVAL; } static struct stmpe_variant_info stmpe801 = { .name = "stmpe801", .id_val = STMPE801_ID, .id_mask = 0xffff, .num_gpios = 8, .regs = stmpe801_regs, .blocks = stmpe801_blocks, .num_blocks = ARRAY_SIZE(stmpe801_blocks), .num_irqs = STMPE801_NR_INTERNAL_IRQS, .enable = stmpe801_enable, }; static struct stmpe_variant_info stmpe801_noirq = { .name = "stmpe801", .id_val = STMPE801_ID, .id_mask = 0xffff, .num_gpios = 8, .regs = stmpe801_regs, .blocks = stmpe801_blocks_noirq, .num_blocks = ARRAY_SIZE(stmpe801_blocks_noirq), .enable = stmpe801_enable, }; /* * Touchscreen (STMPE811 or STMPE610) */ static struct resource stmpe_ts_resources[] = { { .name = "TOUCH_DET", .start = 0, .end = 0, .flags = IORESOURCE_IRQ, }, { .name = "FIFO_TH", .start = 1, .end = 1, .flags = IORESOURCE_IRQ, }, }; static struct mfd_cell stmpe_ts_cell = { .name = "stmpe-ts", .resources = stmpe_ts_resources, .num_resources = ARRAY_SIZE(stmpe_ts_resources), }; /* * STMPE811 or STMPE610 */ static const u8 stmpe811_regs[] = { [STMPE_IDX_CHIP_ID] = STMPE811_REG_CHIP_ID, [STMPE_IDX_ICR_LSB] = STMPE811_REG_INT_CTRL, [STMPE_IDX_IER_LSB] = STMPE811_REG_INT_EN, [STMPE_IDX_ISR_MSB] = STMPE811_REG_INT_STA, [STMPE_IDX_GPMR_LSB] = STMPE811_REG_GPIO_MP_STA, [STMPE_IDX_GPSR_LSB] = STMPE811_REG_GPIO_SET_PIN, [STMPE_IDX_GPCR_LSB] = STMPE811_REG_GPIO_CLR_PIN, [STMPE_IDX_GPDR_LSB] = STMPE811_REG_GPIO_DIR, [STMPE_IDX_GPRER_LSB] = STMPE811_REG_GPIO_RE, [STMPE_IDX_GPFER_LSB] = STMPE811_REG_GPIO_FE, [STMPE_IDX_GPAFR_U_MSB] = STMPE811_REG_GPIO_AF, [STMPE_IDX_IEGPIOR_LSB] = STMPE811_REG_GPIO_INT_EN, [STMPE_IDX_ISGPIOR_MSB] = STMPE811_REG_GPIO_INT_STA, [STMPE_IDX_GPEDR_MSB] = STMPE811_REG_GPIO_ED, }; static struct stmpe_variant_block stmpe811_blocks[] = { { .cell = &stmpe_gpio_cell, .irq = STMPE811_IRQ_GPIOC, .block = STMPE_BLOCK_GPIO, }, { .cell = &stmpe_ts_cell, .irq = STMPE811_IRQ_TOUCH_DET, .block = STMPE_BLOCK_TOUCHSCREEN, }, }; static int stmpe811_enable(struct stmpe *stmpe, unsigned int blocks, bool enable) { unsigned int mask = 0; if (blocks & STMPE_BLOCK_GPIO) mask |= STMPE811_SYS_CTRL2_GPIO_OFF; if (blocks & STMPE_BLOCK_ADC) mask |= STMPE811_SYS_CTRL2_ADC_OFF; if (blocks & STMPE_BLOCK_TOUCHSCREEN) mask |= STMPE811_SYS_CTRL2_TSC_OFF; return __stmpe_set_bits(stmpe, STMPE811_REG_SYS_CTRL2, mask, enable ? 0 : mask); } static int stmpe811_get_altfunc(struct stmpe *stmpe, enum stmpe_block block) { /* 0 for touchscreen, 1 for GPIO */ return block != STMPE_BLOCK_TOUCHSCREEN; } static struct stmpe_variant_info stmpe811 = { .name = "stmpe811", .id_val = 0x0811, .id_mask = 0xffff, .num_gpios = 8, .af_bits = 1, .regs = stmpe811_regs, .blocks = stmpe811_blocks, .num_blocks = ARRAY_SIZE(stmpe811_blocks), .num_irqs = STMPE811_NR_INTERNAL_IRQS, .enable = stmpe811_enable, .get_altfunc = stmpe811_get_altfunc, }; /* Similar to 811, except number of gpios */ static struct stmpe_variant_info stmpe610 = { .name = "stmpe610", .id_val = 0x0811, .id_mask = 0xffff, .num_gpios = 6, .af_bits = 1, .regs = stmpe811_regs, .blocks = stmpe811_blocks, .num_blocks = ARRAY_SIZE(stmpe811_blocks), .num_irqs = STMPE811_NR_INTERNAL_IRQS, .enable = stmpe811_enable, .get_altfunc = stmpe811_get_altfunc, }; /* * STMPE1601 */ static const u8 stmpe1601_regs[] = { [STMPE_IDX_CHIP_ID] = STMPE1601_REG_CHIP_ID, [STMPE_IDX_ICR_LSB] = STMPE1601_REG_ICR_LSB, [STMPE_IDX_IER_LSB] = STMPE1601_REG_IER_LSB, [STMPE_IDX_ISR_MSB] = STMPE1601_REG_ISR_MSB, [STMPE_IDX_GPMR_LSB] = STMPE1601_REG_GPIO_MP_LSB, [STMPE_IDX_GPSR_LSB] = STMPE1601_REG_GPIO_SET_LSB, [STMPE_IDX_GPCR_LSB] = STMPE1601_REG_GPIO_CLR_LSB, [STMPE_IDX_GPDR_LSB] = STMPE1601_REG_GPIO_SET_DIR_LSB, [STMPE_IDX_GPRER_LSB] = STMPE1601_REG_GPIO_RE_LSB, [STMPE_IDX_GPFER_LSB] = STMPE1601_REG_GPIO_FE_LSB, [STMPE_IDX_GPAFR_U_MSB] = STMPE1601_REG_GPIO_AF_U_MSB, [STMPE_IDX_IEGPIOR_LSB] = STMPE1601_REG_INT_EN_GPIO_MASK_LSB, [STMPE_IDX_ISGPIOR_MSB] = STMPE1601_REG_INT_STA_GPIO_MSB, [STMPE_IDX_GPEDR_MSB] = STMPE1601_REG_GPIO_ED_MSB, }; static struct stmpe_variant_block stmpe1601_blocks[] = { { .cell = &stmpe_gpio_cell, .irq = STMPE24XX_IRQ_GPIOC, .block = STMPE_BLOCK_GPIO, }, { .cell = &stmpe_keypad_cell, .irq = STMPE24XX_IRQ_KEYPAD, .block = STMPE_BLOCK_KEYPAD, }, }; /* supported autosleep timeout delay (in msecs) */ static const int stmpe_autosleep_delay[] = { 4, 16, 32, 64, 128, 256, 512, 1024, }; static int stmpe_round_timeout(int timeout) { int i; for (i = 0; i < ARRAY_SIZE(stmpe_autosleep_delay); i++) { if (stmpe_autosleep_delay[i] >= timeout) return i; } /* * requests for delays longer than supported should not return the * longest supported delay */ return -EINVAL; } static int stmpe_autosleep(struct stmpe *stmpe, int autosleep_timeout) { int ret; if (!stmpe->variant->enable_autosleep) return -ENOSYS; mutex_lock(&stmpe->lock); ret = stmpe->variant->enable_autosleep(stmpe, autosleep_timeout); mutex_unlock(&stmpe->lock); return ret; } /* * Both stmpe 1601/2403 support same layout for autosleep */ static int stmpe1601_autosleep(struct stmpe *stmpe, int autosleep_timeout) { int ret, timeout; /* choose the best available timeout */ timeout = stmpe_round_timeout(autosleep_timeout); if (timeout < 0) { dev_err(stmpe->dev, "invalid timeout\n"); return timeout; } ret = __stmpe_set_bits(stmpe, STMPE1601_REG_SYS_CTRL2, STMPE1601_AUTOSLEEP_TIMEOUT_MASK, timeout); if (ret < 0) return ret; return __stmpe_set_bits(stmpe, STMPE1601_REG_SYS_CTRL2, STPME1601_AUTOSLEEP_ENABLE, STPME1601_AUTOSLEEP_ENABLE); } static int stmpe1601_enable(struct stmpe *stmpe, unsigned int blocks, bool enable) { unsigned int mask = 0; if (blocks & STMPE_BLOCK_GPIO) mask |= STMPE1601_SYS_CTRL_ENABLE_GPIO; if (blocks & STMPE_BLOCK_KEYPAD) mask |= STMPE1601_SYS_CTRL_ENABLE_KPC; return __stmpe_set_bits(stmpe, STMPE1601_REG_SYS_CTRL, mask, enable ? mask : 0); } static int stmpe1601_get_altfunc(struct stmpe *stmpe, enum stmpe_block block) { switch (block) { case STMPE_BLOCK_PWM: return 2; case STMPE_BLOCK_KEYPAD: return 1; case STMPE_BLOCK_GPIO: default: return 0; } } static struct stmpe_variant_info stmpe1601 = { .name = "stmpe1601", .id_val = 0x0210, .id_mask = 0xfff0, /* at least 0x0210 and 0x0212 */ .num_gpios = 16, .af_bits = 2, .regs = stmpe1601_regs, .blocks = stmpe1601_blocks, .num_blocks = ARRAY_SIZE(stmpe1601_blocks), .num_irqs = STMPE1601_NR_INTERNAL_IRQS, .enable = stmpe1601_enable, .get_altfunc = stmpe1601_get_altfunc, .enable_autosleep = stmpe1601_autosleep, }; /* * STMPE24XX */ static const u8 stmpe24xx_regs[] = { [STMPE_IDX_CHIP_ID] = STMPE24XX_REG_CHIP_ID, [STMPE_IDX_ICR_LSB] = STMPE24XX_REG_ICR_LSB, [STMPE_IDX_IER_LSB] = STMPE24XX_REG_IER_LSB, [STMPE_IDX_ISR_MSB] = STMPE24XX_REG_ISR_MSB, [STMPE_IDX_GPMR_LSB] = STMPE24XX_REG_GPMR_LSB, [STMPE_IDX_GPSR_LSB] = STMPE24XX_REG_GPSR_LSB, [STMPE_IDX_GPCR_LSB] = STMPE24XX_REG_GPCR_LSB, [STMPE_IDX_GPDR_LSB] = STMPE24XX_REG_GPDR_LSB, [STMPE_IDX_GPRER_LSB] = STMPE24XX_REG_GPRER_LSB, [STMPE_IDX_GPFER_LSB] = STMPE24XX_REG_GPFER_LSB, [STMPE_IDX_GPAFR_U_MSB] = STMPE24XX_REG_GPAFR_U_MSB, [STMPE_IDX_IEGPIOR_LSB] = STMPE24XX_REG_IEGPIOR_LSB, [STMPE_IDX_ISGPIOR_MSB] = STMPE24XX_REG_ISGPIOR_MSB, [STMPE_IDX_GPEDR_MSB] = STMPE24XX_REG_GPEDR_MSB, }; static struct stmpe_variant_block stmpe24xx_blocks[] = { { .cell = &stmpe_gpio_cell, .irq = STMPE24XX_IRQ_GPIOC, .block = STMPE_BLOCK_GPIO, }, { .cell = &stmpe_keypad_cell, .irq = STMPE24XX_IRQ_KEYPAD, .block = STMPE_BLOCK_KEYPAD, }, }; static int stmpe24xx_enable(struct stmpe *stmpe, unsigned int blocks, bool enable) { unsigned int mask = 0; if (blocks & STMPE_BLOCK_GPIO) mask |= STMPE24XX_SYS_CTRL_ENABLE_GPIO; if (blocks & STMPE_BLOCK_KEYPAD) mask |= STMPE24XX_SYS_CTRL_ENABLE_KPC; return __stmpe_set_bits(stmpe, STMPE24XX_REG_SYS_CTRL, mask, enable ? mask : 0); } static int stmpe24xx_get_altfunc(struct stmpe *stmpe, enum stmpe_block block) { switch (block) { case STMPE_BLOCK_ROTATOR: return 2; case STMPE_BLOCK_KEYPAD: return 1; case STMPE_BLOCK_GPIO: default: return 0; } } static struct stmpe_variant_info stmpe2401 = { .name = "stmpe2401", .id_val = 0x0101, .id_mask = 0xffff, .num_gpios = 24, .af_bits = 2, .regs = stmpe24xx_regs, .blocks = stmpe24xx_blocks, .num_blocks = ARRAY_SIZE(stmpe24xx_blocks), .num_irqs = STMPE24XX_NR_INTERNAL_IRQS, .enable = stmpe24xx_enable, .get_altfunc = stmpe24xx_get_altfunc, }; static struct stmpe_variant_info stmpe2403 = { .name = "stmpe2403", .id_val = 0x0120, .id_mask = 0xffff, .num_gpios = 24, .af_bits = 2, .regs = stmpe24xx_regs, .blocks = stmpe24xx_blocks, .num_blocks = ARRAY_SIZE(stmpe24xx_blocks), .num_irqs = STMPE24XX_NR_INTERNAL_IRQS, .enable = stmpe24xx_enable, .get_altfunc = stmpe24xx_get_altfunc, .enable_autosleep = stmpe1601_autosleep, /* same as stmpe1601 */ }; static struct stmpe_variant_info *stmpe_variant_info[STMPE_NBR_PARTS] = { [STMPE610] = &stmpe610, [STMPE801] = &stmpe801, [STMPE811] = &stmpe811, [STMPE1601] = &stmpe1601, [STMPE2401] = &stmpe2401, [STMPE2403] = &stmpe2403, }; /* * These devices can be connected in a 'no-irq' configuration - the irq pin * is not used and the device cannot interrupt the CPU. Here we only list * devices which support this configuration - the driver will fail probing * for any devices not listed here which are configured in this way. */ static struct stmpe_variant_info *stmpe_noirq_variant_info[STMPE_NBR_PARTS] = { [STMPE801] = &stmpe801_noirq, }; static irqreturn_t stmpe_irq(int irq, void *data) { struct stmpe *stmpe = data; struct stmpe_variant_info *variant = stmpe->variant; int num = DIV_ROUND_UP(variant->num_irqs, 8); u8 israddr = stmpe->regs[STMPE_IDX_ISR_MSB]; u8 isr[num]; int ret; int i; if (variant->id_val == STMPE801_ID) { handle_nested_irq(stmpe->irq_base); return IRQ_HANDLED; } ret = stmpe_block_read(stmpe, israddr, num, isr); if (ret < 0) return IRQ_NONE; for (i = 0; i < num; i++) { int bank = num - i - 1; u8 status = isr[i]; u8 clear; status &= stmpe->ier[bank]; if (!status) continue; clear = status; while (status) { int bit = __ffs(status); int line = bank * 8 + bit; handle_nested_irq(stmpe->irq_base + line); status &= ~(1 << bit); } stmpe_reg_write(stmpe, israddr + i, clear); } return IRQ_HANDLED; } static void stmpe_irq_lock(struct irq_data *data) { struct stmpe *stmpe = irq_data_get_irq_chip_data(data); mutex_lock(&stmpe->irq_lock); } static void stmpe_irq_sync_unlock(struct irq_data *data) { struct stmpe *stmpe = irq_data_get_irq_chip_data(data); struct stmpe_variant_info *variant = stmpe->variant; int num = DIV_ROUND_UP(variant->num_irqs, 8); int i; for (i = 0; i < num; i++) { u8 new = stmpe->ier[i]; u8 old = stmpe->oldier[i]; if (new == old) continue; stmpe->oldier[i] = new; stmpe_reg_write(stmpe, stmpe->regs[STMPE_IDX_IER_LSB] - i, new); } mutex_unlock(&stmpe->irq_lock); } static void stmpe_irq_mask(struct irq_data *data) { struct stmpe *stmpe = irq_data_get_irq_chip_data(data); int offset = data->irq - stmpe->irq_base; int regoffset = offset / 8; int mask = 1 << (offset % 8); stmpe->ier[regoffset] &= ~mask; } static void stmpe_irq_unmask(struct irq_data *data) { struct stmpe *stmpe = irq_data_get_irq_chip_data(data); int offset = data->irq - stmpe->irq_base; int regoffset = offset / 8; int mask = 1 << (offset % 8); stmpe->ier[regoffset] |= mask; } static struct irq_chip stmpe_irq_chip = { .name = "stmpe", .irq_bus_lock = stmpe_irq_lock, .irq_bus_sync_unlock = stmpe_irq_sync_unlock, .irq_mask = stmpe_irq_mask, .irq_unmask = stmpe_irq_unmask, }; static int __devinit stmpe_irq_init(struct stmpe *stmpe) { struct irq_chip *chip = NULL; int num_irqs = stmpe->variant->num_irqs; int base = stmpe->irq_base; int irq; if (stmpe->variant->id_val != STMPE801_ID) chip = &stmpe_irq_chip; for (irq = base; irq < base + num_irqs; irq++) { irq_set_chip_data(irq, stmpe); irq_set_chip_and_handler(irq, chip, handle_edge_irq); irq_set_nested_thread(irq, 1); #ifdef CONFIG_ARM set_irq_flags(irq, IRQF_VALID); #else irq_set_noprobe(irq); #endif } return 0; } static void stmpe_irq_remove(struct stmpe *stmpe) { int num_irqs = stmpe->variant->num_irqs; int base = stmpe->irq_base; int irq; for (irq = base; irq < base + num_irqs; irq++) { #ifdef CONFIG_ARM set_irq_flags(irq, 0); #endif irq_set_chip_and_handler(irq, NULL, NULL); irq_set_chip_data(irq, NULL); } } static int __devinit stmpe_chip_init(struct stmpe *stmpe) { unsigned int irq_trigger = stmpe->pdata->irq_trigger; int autosleep_timeout = stmpe->pdata->autosleep_timeout; struct stmpe_variant_info *variant = stmpe->variant; u8 icr = 0; unsigned int id; u8 data[2]; int ret; ret = stmpe_block_read(stmpe, stmpe->regs[STMPE_IDX_CHIP_ID], ARRAY_SIZE(data), data); if (ret < 0) return ret; id = (data[0] << 8) | data[1]; if ((id & variant->id_mask) != variant->id_val) { dev_err(stmpe->dev, "unknown chip id: %#x\n", id); return -EINVAL; } dev_info(stmpe->dev, "%s detected, chip id: %#x\n", variant->name, id); /* Disable all modules -- subdrivers should enable what they need. */ ret = stmpe_disable(stmpe, ~0); if (ret) return ret; if (stmpe->irq >= 0) { if (id == STMPE801_ID) icr = STMPE801_REG_SYS_CTRL_INT_EN; else icr = STMPE_ICR_LSB_GIM; /* STMPE801 doesn't support Edge interrupts */ if (id != STMPE801_ID) { if (irq_trigger == IRQF_TRIGGER_FALLING || irq_trigger == IRQF_TRIGGER_RISING) icr |= STMPE_ICR_LSB_EDGE; } if (irq_trigger == IRQF_TRIGGER_RISING || irq_trigger == IRQF_TRIGGER_HIGH) { if (id == STMPE801_ID) icr |= STMPE801_REG_SYS_CTRL_INT_HI; else icr |= STMPE_ICR_LSB_HIGH; } if (stmpe->pdata->irq_invert_polarity) { if (id == STMPE801_ID) icr ^= STMPE801_REG_SYS_CTRL_INT_HI; else icr ^= STMPE_ICR_LSB_HIGH; } } if (stmpe->pdata->autosleep) { ret = stmpe_autosleep(stmpe, autosleep_timeout); if (ret) return ret; } return stmpe_reg_write(stmpe, stmpe->regs[STMPE_IDX_ICR_LSB], icr); } static int __devinit stmpe_add_device(struct stmpe *stmpe, struct mfd_cell *cell, int irq) { return mfd_add_devices(stmpe->dev, stmpe->pdata->id, cell, 1, NULL, stmpe->irq_base + irq); } static int __devinit stmpe_devices_init(struct stmpe *stmpe) { struct stmpe_variant_info *variant = stmpe->variant; unsigned int platform_blocks = stmpe->pdata->blocks; int ret = -EINVAL; int i; for (i = 0; i < variant->num_blocks; i++) { struct stmpe_variant_block *block = &variant->blocks[i]; if (!(platform_blocks & block->block)) continue; platform_blocks &= ~block->block; ret = stmpe_add_device(stmpe, block->cell, block->irq); if (ret) return ret; } if (platform_blocks) dev_warn(stmpe->dev, "platform wants blocks (%#x) not present on variant", platform_blocks); return ret; } /* Called from client specific probe routines */ int __devinit stmpe_probe(struct stmpe_client_info *ci, int partnum) { struct stmpe_platform_data *pdata = dev_get_platdata(ci->dev); struct stmpe *stmpe; int ret; if (!pdata) return -EINVAL; stmpe = kzalloc(sizeof(struct stmpe), GFP_KERNEL); if (!stmpe) return -ENOMEM; mutex_init(&stmpe->irq_lock); mutex_init(&stmpe->lock); stmpe->dev = ci->dev; stmpe->client = ci->client; stmpe->pdata = pdata; stmpe->irq_base = pdata->irq_base; stmpe->ci = ci; stmpe->partnum = partnum; stmpe->variant = stmpe_variant_info[partnum]; stmpe->regs = stmpe->variant->regs; stmpe->num_gpios = stmpe->variant->num_gpios; dev_set_drvdata(stmpe->dev, stmpe); if (ci->init) ci->init(stmpe); if (pdata->irq_over_gpio) { ret = gpio_request_one(pdata->irq_gpio, GPIOF_DIR_IN, "stmpe"); if (ret) { dev_err(stmpe->dev, "failed to request IRQ GPIO: %d\n", ret); goto out_free; } stmpe->irq = gpio_to_irq(pdata->irq_gpio); } else { stmpe->irq = ci->irq; } if (stmpe->irq < 0) { /* use alternate variant info for no-irq mode, if supported */ dev_info(stmpe->dev, "%s configured in no-irq mode by platform data\n", stmpe->variant->name); if (!stmpe_noirq_variant_info[stmpe->partnum]) { dev_err(stmpe->dev, "%s does not support no-irq mode!\n", stmpe->variant->name); ret = -ENODEV; goto free_gpio; } stmpe->variant = stmpe_noirq_variant_info[stmpe->partnum]; } ret = stmpe_chip_init(stmpe); if (ret) goto free_gpio; if (stmpe->irq >= 0) { ret = stmpe_irq_init(stmpe); if (ret) goto free_gpio; ret = request_threaded_irq(stmpe->irq, NULL, stmpe_irq, pdata->irq_trigger | IRQF_ONESHOT, "stmpe", stmpe); if (ret) { dev_err(stmpe->dev, "failed to request IRQ: %d\n", ret); goto out_removeirq; } } ret = stmpe_devices_init(stmpe); if (ret) { dev_err(stmpe->dev, "failed to add children\n"); goto out_removedevs; } return 0; out_removedevs: mfd_remove_devices(stmpe->dev); if (stmpe->irq >= 0) free_irq(stmpe->irq, stmpe); out_removeirq: if (stmpe->irq >= 0) stmpe_irq_remove(stmpe); free_gpio: if (pdata->irq_over_gpio) gpio_free(pdata->irq_gpio); out_free: kfree(stmpe); return ret; } int stmpe_remove(struct stmpe *stmpe) { mfd_remove_devices(stmpe->dev); if (stmpe->irq >= 0) { free_irq(stmpe->irq, stmpe); stmpe_irq_remove(stmpe); } if (stmpe->pdata->irq_over_gpio) gpio_free(stmpe->pdata->irq_gpio); kfree(stmpe); return 0; } #ifdef CONFIG_PM static int stmpe_suspend(struct device *dev) { struct stmpe *stmpe = dev_get_drvdata(dev); if (stmpe->irq >= 0 && device_may_wakeup(dev)) enable_irq_wake(stmpe->irq); return 0; } static int stmpe_resume(struct device *dev) { struct stmpe *stmpe = dev_get_drvdata(dev); if (stmpe->irq >= 0 && device_may_wakeup(dev)) disable_irq_wake(stmpe->irq); return 0; } const struct dev_pm_ops stmpe_dev_pm_ops = { .suspend = stmpe_suspend, .resume = stmpe_resume, }; #endif
gpl-2.0
neonicus/Paralax
drivers/net/wireless/iwlegacy/4965.c
5126
52673
/****************************************************************************** * * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * Intel Linux Wireless <ilw@linux.intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <net/mac80211.h> #include <linux/etherdevice.h> #include <asm/unaligned.h> #include "common.h" #include "4965.h" /** * il_verify_inst_sparse - verify runtime uCode image in card vs. host, * using sample data 100 bytes apart. If these sample points are good, * it's a pretty good bet that everything between them is good, too. */ static int il4965_verify_inst_sparse(struct il_priv *il, __le32 * image, u32 len) { u32 val; int ret = 0; u32 errcnt = 0; u32 i; D_INFO("ucode inst image size is %u\n", len); for (i = 0; i < len; i += 100, image += 100 / sizeof(u32)) { /* read data comes through single port, auto-incr addr */ /* NOTE: Use the debugless read so we don't flood kernel log * if IL_DL_IO is set */ il_wr(il, HBUS_TARG_MEM_RADDR, i + IL4965_RTC_INST_LOWER_BOUND); val = _il_rd(il, HBUS_TARG_MEM_RDAT); if (val != le32_to_cpu(*image)) { ret = -EIO; errcnt++; if (errcnt >= 3) break; } } return ret; } /** * il4965_verify_inst_full - verify runtime uCode image in card vs. host, * looking at all data. */ static int il4965_verify_inst_full(struct il_priv *il, __le32 * image, u32 len) { u32 val; u32 save_len = len; int ret = 0; u32 errcnt; D_INFO("ucode inst image size is %u\n", len); il_wr(il, HBUS_TARG_MEM_RADDR, IL4965_RTC_INST_LOWER_BOUND); errcnt = 0; for (; len > 0; len -= sizeof(u32), image++) { /* read data comes through single port, auto-incr addr */ /* NOTE: Use the debugless read so we don't flood kernel log * if IL_DL_IO is set */ val = _il_rd(il, HBUS_TARG_MEM_RDAT); if (val != le32_to_cpu(*image)) { IL_ERR("uCode INST section is invalid at " "offset 0x%x, is 0x%x, s/b 0x%x\n", save_len - len, val, le32_to_cpu(*image)); ret = -EIO; errcnt++; if (errcnt >= 20) break; } } if (!errcnt) D_INFO("ucode image in INSTRUCTION memory is good\n"); return ret; } /** * il4965_verify_ucode - determine which instruction image is in SRAM, * and verify its contents */ int il4965_verify_ucode(struct il_priv *il) { __le32 *image; u32 len; int ret; /* Try bootstrap */ image = (__le32 *) il->ucode_boot.v_addr; len = il->ucode_boot.len; ret = il4965_verify_inst_sparse(il, image, len); if (!ret) { D_INFO("Bootstrap uCode is good in inst SRAM\n"); return 0; } /* Try initialize */ image = (__le32 *) il->ucode_init.v_addr; len = il->ucode_init.len; ret = il4965_verify_inst_sparse(il, image, len); if (!ret) { D_INFO("Initialize uCode is good in inst SRAM\n"); return 0; } /* Try runtime/protocol */ image = (__le32 *) il->ucode_code.v_addr; len = il->ucode_code.len; ret = il4965_verify_inst_sparse(il, image, len); if (!ret) { D_INFO("Runtime uCode is good in inst SRAM\n"); return 0; } IL_ERR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n"); /* Since nothing seems to match, show first several data entries in * instruction SRAM, so maybe visual inspection will give a clue. * Selection of bootstrap image (vs. other images) is arbitrary. */ image = (__le32 *) il->ucode_boot.v_addr; len = il->ucode_boot.len; ret = il4965_verify_inst_full(il, image, len); return ret; } /****************************************************************************** * * EEPROM related functions * ******************************************************************************/ /* * The device's EEPROM semaphore prevents conflicts between driver and uCode * when accessing the EEPROM; each access is a series of pulses to/from the * EEPROM chip, not a single event, so even reads could conflict if they * weren't arbitrated by the semaphore. */ int il4965_eeprom_acquire_semaphore(struct il_priv *il) { u16 count; int ret; for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) { /* Request semaphore */ il_set_bit(il, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); /* See if we got it */ ret = _il_poll_bit(il, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, EEPROM_SEM_TIMEOUT); if (ret >= 0) return ret; } return ret; } void il4965_eeprom_release_semaphore(struct il_priv *il) { il_clear_bit(il, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); } int il4965_eeprom_check_version(struct il_priv *il) { u16 eeprom_ver; u16 calib_ver; eeprom_ver = il_eeprom_query16(il, EEPROM_VERSION); calib_ver = il_eeprom_query16(il, EEPROM_4965_CALIB_VERSION_OFFSET); if (eeprom_ver < il->cfg->eeprom_ver || calib_ver < il->cfg->eeprom_calib_ver) goto err; IL_INFO("device EEPROM VER=0x%x, CALIB=0x%x\n", eeprom_ver, calib_ver); return 0; err: IL_ERR("Unsupported (too old) EEPROM VER=0x%x < 0x%x " "CALIB=0x%x < 0x%x\n", eeprom_ver, il->cfg->eeprom_ver, calib_ver, il->cfg->eeprom_calib_ver); return -EINVAL; } void il4965_eeprom_get_mac(const struct il_priv *il, u8 * mac) { const u8 *addr = il_eeprom_query_addr(il, EEPROM_MAC_ADDRESS); memcpy(mac, addr, ETH_ALEN); } /* Send led command */ static int il4965_send_led_cmd(struct il_priv *il, struct il_led_cmd *led_cmd) { struct il_host_cmd cmd = { .id = C_LEDS, .len = sizeof(struct il_led_cmd), .data = led_cmd, .flags = CMD_ASYNC, .callback = NULL, }; u32 reg; reg = _il_rd(il, CSR_LED_REG); if (reg != (reg & CSR_LED_BSM_CTRL_MSK)) _il_wr(il, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK); return il_send_cmd(il, &cmd); } /* Set led register off */ void il4965_led_enable(struct il_priv *il) { _il_wr(il, CSR_LED_REG, CSR_LED_REG_TRUN_ON); } static int il4965_send_tx_power(struct il_priv *il); static int il4965_hw_get_temperature(struct il_priv *il); /* Highest firmware API version supported */ #define IL4965_UCODE_API_MAX 2 /* Lowest firmware API version supported */ #define IL4965_UCODE_API_MIN 2 #define IL4965_FW_PRE "iwlwifi-4965-" #define _IL4965_MODULE_FIRMWARE(api) IL4965_FW_PRE #api ".ucode" #define IL4965_MODULE_FIRMWARE(api) _IL4965_MODULE_FIRMWARE(api) /* check contents of special bootstrap uCode SRAM */ static int il4965_verify_bsm(struct il_priv *il) { __le32 *image = il->ucode_boot.v_addr; u32 len = il->ucode_boot.len; u32 reg; u32 val; D_INFO("Begin verify bsm\n"); /* verify BSM SRAM contents */ val = il_rd_prph(il, BSM_WR_DWCOUNT_REG); for (reg = BSM_SRAM_LOWER_BOUND; reg < BSM_SRAM_LOWER_BOUND + len; reg += sizeof(u32), image++) { val = il_rd_prph(il, reg); if (val != le32_to_cpu(*image)) { IL_ERR("BSM uCode verification failed at " "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n", BSM_SRAM_LOWER_BOUND, reg - BSM_SRAM_LOWER_BOUND, len, val, le32_to_cpu(*image)); return -EIO; } } D_INFO("BSM bootstrap uCode image OK\n"); return 0; } /** * il4965_load_bsm - Load bootstrap instructions * * BSM operation: * * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program * in special SRAM that does not power down during RFKILL. When powering back * up after power-saving sleeps (or during initial uCode load), the BSM loads * the bootstrap program into the on-board processor, and starts it. * * The bootstrap program loads (via DMA) instructions and data for a new * program from host DRAM locations indicated by the host driver in the * BSM_DRAM_* registers. Once the new program is loaded, it starts * automatically. * * When initializing the NIC, the host driver points the BSM to the * "initialize" uCode image. This uCode sets up some internal data, then * notifies host via "initialize alive" that it is complete. * * The host then replaces the BSM_DRAM_* pointer values to point to the * normal runtime uCode instructions and a backup uCode data cache buffer * (filled initially with starting data values for the on-board processor), * then triggers the "initialize" uCode to load and launch the runtime uCode, * which begins normal operation. * * When doing a power-save shutdown, runtime uCode saves data SRAM into * the backup data cache in DRAM before SRAM is powered down. * * When powering back up, the BSM loads the bootstrap program. This reloads * the runtime uCode instructions and the backup data cache into SRAM, * and re-launches the runtime uCode from where it left off. */ static int il4965_load_bsm(struct il_priv *il) { __le32 *image = il->ucode_boot.v_addr; u32 len = il->ucode_boot.len; dma_addr_t pinst; dma_addr_t pdata; u32 inst_len; u32 data_len; int i; u32 done; u32 reg_offset; int ret; D_INFO("Begin load bsm\n"); il->ucode_type = UCODE_RT; /* make sure bootstrap program is no larger than BSM's SRAM size */ if (len > IL49_MAX_BSM_SIZE) return -EINVAL; /* Tell bootstrap uCode where to find the "Initialize" uCode * in host DRAM ... host DRAM physical address bits 35:4 for 4965. * NOTE: il_init_alive_start() will replace these values, * after the "initialize" uCode has run, to point to * runtime/protocol instructions and backup data cache. */ pinst = il->ucode_init.p_addr >> 4; pdata = il->ucode_init_data.p_addr >> 4; inst_len = il->ucode_init.len; data_len = il->ucode_init_data.len; il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst); il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata); il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG, inst_len); il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, data_len); /* Fill BSM memory with bootstrap instructions */ for (reg_offset = BSM_SRAM_LOWER_BOUND; reg_offset < BSM_SRAM_LOWER_BOUND + len; reg_offset += sizeof(u32), image++) _il_wr_prph(il, reg_offset, le32_to_cpu(*image)); ret = il4965_verify_bsm(il); if (ret) return ret; /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */ il_wr_prph(il, BSM_WR_MEM_SRC_REG, 0x0); il_wr_prph(il, BSM_WR_MEM_DST_REG, IL49_RTC_INST_LOWER_BOUND); il_wr_prph(il, BSM_WR_DWCOUNT_REG, len / sizeof(u32)); /* Load bootstrap code into instruction SRAM now, * to prepare to load "initialize" uCode */ il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START); /* Wait for load of bootstrap uCode to finish */ for (i = 0; i < 100; i++) { done = il_rd_prph(il, BSM_WR_CTRL_REG); if (!(done & BSM_WR_CTRL_REG_BIT_START)) break; udelay(10); } if (i < 100) D_INFO("BSM write complete, poll %d iterations\n", i); else { IL_ERR("BSM write did not complete!\n"); return -EIO; } /* Enable future boot loads whenever power management unit triggers it * (e.g. when powering back up after power-save shutdown) */ il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN); return 0; } /** * il4965_set_ucode_ptrs - Set uCode address location * * Tell initialization uCode where to find runtime uCode. * * BSM registers initially contain pointers to initialization uCode. * We need to replace them to load runtime uCode inst and data, * and to save runtime data when powering down. */ static int il4965_set_ucode_ptrs(struct il_priv *il) { dma_addr_t pinst; dma_addr_t pdata; int ret = 0; /* bits 35:4 for 4965 */ pinst = il->ucode_code.p_addr >> 4; pdata = il->ucode_data_backup.p_addr >> 4; /* Tell bootstrap uCode where to find image to load */ il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst); il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata); il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, il->ucode_data.len); /* Inst byte count must be last to set up, bit 31 signals uCode * that all new ptr/size info is in place */ il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG, il->ucode_code.len | BSM_DRAM_INST_LOAD); D_INFO("Runtime uCode pointers are set.\n"); return ret; } /** * il4965_init_alive_start - Called after N_ALIVE notification received * * Called after N_ALIVE notification received from "initialize" uCode. * * The 4965 "initialize" ALIVE reply contains calibration data for: * Voltage, temperature, and MIMO tx gain correction, now stored in il * (3945 does not contain this data). * * Tell "initialize" uCode to go ahead and load the runtime uCode. */ static void il4965_init_alive_start(struct il_priv *il) { /* Bootstrap uCode has loaded initialize uCode ... verify inst image. * This is a paranoid check, because we would not have gotten the * "initialize" alive if code weren't properly loaded. */ if (il4965_verify_ucode(il)) { /* Runtime instruction load was bad; * take it all the way back down so we can try again */ D_INFO("Bad \"initialize\" uCode load.\n"); goto restart; } /* Calculate temperature */ il->temperature = il4965_hw_get_temperature(il); /* Send pointers to protocol/runtime uCode image ... init code will * load and launch runtime uCode, which will send us another "Alive" * notification. */ D_INFO("Initialization Alive received.\n"); if (il4965_set_ucode_ptrs(il)) { /* Runtime instruction load won't happen; * take it all the way back down so we can try again */ D_INFO("Couldn't set up uCode pointers.\n"); goto restart; } return; restart: queue_work(il->workqueue, &il->restart); } static bool iw4965_is_ht40_channel(__le32 rxon_flags) { int chan_mod = le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK) >> RXON_FLG_CHANNEL_MODE_POS; return (chan_mod == CHANNEL_MODE_PURE_40 || chan_mod == CHANNEL_MODE_MIXED); } void il4965_nic_config(struct il_priv *il) { unsigned long flags; u16 radio_cfg; spin_lock_irqsave(&il->lock, flags); radio_cfg = il_eeprom_query16(il, EEPROM_RADIO_CONFIG); /* write radio config values to register */ if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX) il_set_bit(il, CSR_HW_IF_CONFIG_REG, EEPROM_RF_CFG_TYPE_MSK(radio_cfg) | EEPROM_RF_CFG_STEP_MSK(radio_cfg) | EEPROM_RF_CFG_DASH_MSK(radio_cfg)); /* set CSR_HW_CONFIG_REG for uCode use */ il_set_bit(il, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | CSR_HW_IF_CONFIG_REG_BIT_MAC_SI); il->calib_info = (struct il_eeprom_calib_info *) il_eeprom_query_addr(il, EEPROM_4965_CALIB_TXPOWER_OFFSET); spin_unlock_irqrestore(&il->lock, flags); } /* Reset differential Rx gains in NIC to prepare for chain noise calibration. * Called after every association, but this runs only once! * ... once chain noise is calibrated the first time, it's good forever. */ static void il4965_chain_noise_reset(struct il_priv *il) { struct il_chain_noise_data *data = &(il->chain_noise_data); if (data->state == IL_CHAIN_NOISE_ALIVE && il_is_any_associated(il)) { struct il_calib_diff_gain_cmd cmd; /* clear data for chain noise calibration algorithm */ data->chain_noise_a = 0; data->chain_noise_b = 0; data->chain_noise_c = 0; data->chain_signal_a = 0; data->chain_signal_b = 0; data->chain_signal_c = 0; data->beacon_count = 0; memset(&cmd, 0, sizeof(cmd)); cmd.hdr.op_code = IL_PHY_CALIBRATE_DIFF_GAIN_CMD; cmd.diff_gain_a = 0; cmd.diff_gain_b = 0; cmd.diff_gain_c = 0; if (il_send_cmd_pdu(il, C_PHY_CALIBRATION, sizeof(cmd), &cmd)) IL_ERR("Could not send C_PHY_CALIBRATION\n"); data->state = IL_CHAIN_NOISE_ACCUMULATE; D_CALIB("Run chain_noise_calibrate\n"); } } static s32 il4965_math_div_round(s32 num, s32 denom, s32 * res) { s32 sign = 1; if (num < 0) { sign = -sign; num = -num; } if (denom < 0) { sign = -sign; denom = -denom; } *res = 1; *res = ((num * 2 + denom) / (denom * 2)) * sign; return 1; } /** * il4965_get_voltage_compensation - Power supply voltage comp for txpower * * Determines power supply voltage compensation for txpower calculations. * Returns number of 1/2-dB steps to subtract from gain table idx, * to compensate for difference between power supply voltage during * factory measurements, vs. current power supply voltage. * * Voltage indication is higher for lower voltage. * Lower voltage requires more gain (lower gain table idx). */ static s32 il4965_get_voltage_compensation(s32 eeprom_voltage, s32 current_voltage) { s32 comp = 0; if (TX_POWER_IL_ILLEGAL_VOLTAGE == eeprom_voltage || TX_POWER_IL_ILLEGAL_VOLTAGE == current_voltage) return 0; il4965_math_div_round(current_voltage - eeprom_voltage, TX_POWER_IL_VOLTAGE_CODES_PER_03V, &comp); if (current_voltage > eeprom_voltage) comp *= 2; if ((comp < -2) || (comp > 2)) comp = 0; return comp; } static s32 il4965_get_tx_atten_grp(u16 channel) { if (channel >= CALIB_IL_TX_ATTEN_GR5_FCH && channel <= CALIB_IL_TX_ATTEN_GR5_LCH) return CALIB_CH_GROUP_5; if (channel >= CALIB_IL_TX_ATTEN_GR1_FCH && channel <= CALIB_IL_TX_ATTEN_GR1_LCH) return CALIB_CH_GROUP_1; if (channel >= CALIB_IL_TX_ATTEN_GR2_FCH && channel <= CALIB_IL_TX_ATTEN_GR2_LCH) return CALIB_CH_GROUP_2; if (channel >= CALIB_IL_TX_ATTEN_GR3_FCH && channel <= CALIB_IL_TX_ATTEN_GR3_LCH) return CALIB_CH_GROUP_3; if (channel >= CALIB_IL_TX_ATTEN_GR4_FCH && channel <= CALIB_IL_TX_ATTEN_GR4_LCH) return CALIB_CH_GROUP_4; return -EINVAL; } static u32 il4965_get_sub_band(const struct il_priv *il, u32 channel) { s32 b = -1; for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) { if (il->calib_info->band_info[b].ch_from == 0) continue; if (channel >= il->calib_info->band_info[b].ch_from && channel <= il->calib_info->band_info[b].ch_to) break; } return b; } static s32 il4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2) { s32 val; if (x2 == x1) return y1; else { il4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val); return val + y2; } } /** * il4965_interpolate_chan - Interpolate factory measurements for one channel * * Interpolates factory measurements from the two sample channels within a * sub-band, to apply to channel of interest. Interpolation is proportional to * differences in channel frequencies, which is proportional to differences * in channel number. */ static int il4965_interpolate_chan(struct il_priv *il, u32 channel, struct il_eeprom_calib_ch_info *chan_info) { s32 s = -1; u32 c; u32 m; const struct il_eeprom_calib_measure *m1; const struct il_eeprom_calib_measure *m2; struct il_eeprom_calib_measure *omeas; u32 ch_i1; u32 ch_i2; s = il4965_get_sub_band(il, channel); if (s >= EEPROM_TX_POWER_BANDS) { IL_ERR("Tx Power can not find channel %d\n", channel); return -1; } ch_i1 = il->calib_info->band_info[s].ch1.ch_num; ch_i2 = il->calib_info->band_info[s].ch2.ch_num; chan_info->ch_num = (u8) channel; D_TXPOWER("channel %d subband %d factory cal ch %d & %d\n", channel, s, ch_i1, ch_i2); for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) { for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) { m1 = &(il->calib_info->band_info[s].ch1. measurements[c][m]); m2 = &(il->calib_info->band_info[s].ch2. measurements[c][m]); omeas = &(chan_info->measurements[c][m]); omeas->actual_pow = (u8) il4965_interpolate_value(channel, ch_i1, m1->actual_pow, ch_i2, m2->actual_pow); omeas->gain_idx = (u8) il4965_interpolate_value(channel, ch_i1, m1->gain_idx, ch_i2, m2->gain_idx); omeas->temperature = (u8) il4965_interpolate_value(channel, ch_i1, m1->temperature, ch_i2, m2->temperature); omeas->pa_det = (s8) il4965_interpolate_value(channel, ch_i1, m1->pa_det, ch_i2, m2->pa_det); D_TXPOWER("chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, m, m1->actual_pow, m2->actual_pow, omeas->actual_pow); D_TXPOWER("chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, m, m1->gain_idx, m2->gain_idx, omeas->gain_idx); D_TXPOWER("chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, m, m1->pa_det, m2->pa_det, omeas->pa_det); D_TXPOWER("chain %d meas %d T1=%d T2=%d T=%d\n", c, m, m1->temperature, m2->temperature, omeas->temperature); } } return 0; } /* bit-rate-dependent table to prevent Tx distortion, in half-dB units, * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */ static s32 back_off_table[] = { 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */ 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */ 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */ 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */ 10 /* CCK */ }; /* Thermal compensation values for txpower for various frequency ranges ... * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */ static struct il4965_txpower_comp_entry { s32 degrees_per_05db_a; s32 degrees_per_05db_a_denom; } tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = { { 9, 2}, /* group 0 5.2, ch 34-43 */ { 4, 1}, /* group 1 5.2, ch 44-70 */ { 4, 1}, /* group 2 5.2, ch 71-124 */ { 4, 1}, /* group 3 5.2, ch 125-200 */ { 3, 1} /* group 4 2.4, ch all */ }; static s32 get_min_power_idx(s32 rate_power_idx, u32 band) { if (!band) { if ((rate_power_idx & 7) <= 4) return MIN_TX_GAIN_IDX_52GHZ_EXT; } return MIN_TX_GAIN_IDX; } struct gain_entry { u8 dsp; u8 radio; }; static const struct gain_entry gain_table[2][108] = { /* 5.2GHz power gain idx table */ { {123, 0x3F}, /* highest txpower */ {117, 0x3F}, {110, 0x3F}, {104, 0x3F}, {98, 0x3F}, {110, 0x3E}, {104, 0x3E}, {98, 0x3E}, {110, 0x3D}, {104, 0x3D}, {98, 0x3D}, {110, 0x3C}, {104, 0x3C}, {98, 0x3C}, {110, 0x3B}, {104, 0x3B}, {98, 0x3B}, {110, 0x3A}, {104, 0x3A}, {98, 0x3A}, {110, 0x39}, {104, 0x39}, {98, 0x39}, {110, 0x38}, {104, 0x38}, {98, 0x38}, {110, 0x37}, {104, 0x37}, {98, 0x37}, {110, 0x36}, {104, 0x36}, {98, 0x36}, {110, 0x35}, {104, 0x35}, {98, 0x35}, {110, 0x34}, {104, 0x34}, {98, 0x34}, {110, 0x33}, {104, 0x33}, {98, 0x33}, {110, 0x32}, {104, 0x32}, {98, 0x32}, {110, 0x31}, {104, 0x31}, {98, 0x31}, {110, 0x30}, {104, 0x30}, {98, 0x30}, {110, 0x25}, {104, 0x25}, {98, 0x25}, {110, 0x24}, {104, 0x24}, {98, 0x24}, {110, 0x23}, {104, 0x23}, {98, 0x23}, {110, 0x22}, {104, 0x18}, {98, 0x18}, {110, 0x17}, {104, 0x17}, {98, 0x17}, {110, 0x16}, {104, 0x16}, {98, 0x16}, {110, 0x15}, {104, 0x15}, {98, 0x15}, {110, 0x14}, {104, 0x14}, {98, 0x14}, {110, 0x13}, {104, 0x13}, {98, 0x13}, {110, 0x12}, {104, 0x08}, {98, 0x08}, {110, 0x07}, {104, 0x07}, {98, 0x07}, {110, 0x06}, {104, 0x06}, {98, 0x06}, {110, 0x05}, {104, 0x05}, {98, 0x05}, {110, 0x04}, {104, 0x04}, {98, 0x04}, {110, 0x03}, {104, 0x03}, {98, 0x03}, {110, 0x02}, {104, 0x02}, {98, 0x02}, {110, 0x01}, {104, 0x01}, {98, 0x01}, {110, 0x00}, {104, 0x00}, {98, 0x00}, {93, 0x00}, {88, 0x00}, {83, 0x00}, {78, 0x00}, }, /* 2.4GHz power gain idx table */ { {110, 0x3f}, /* highest txpower */ {104, 0x3f}, {98, 0x3f}, {110, 0x3e}, {104, 0x3e}, {98, 0x3e}, {110, 0x3d}, {104, 0x3d}, {98, 0x3d}, {110, 0x3c}, {104, 0x3c}, {98, 0x3c}, {110, 0x3b}, {104, 0x3b}, {98, 0x3b}, {110, 0x3a}, {104, 0x3a}, {98, 0x3a}, {110, 0x39}, {104, 0x39}, {98, 0x39}, {110, 0x38}, {104, 0x38}, {98, 0x38}, {110, 0x37}, {104, 0x37}, {98, 0x37}, {110, 0x36}, {104, 0x36}, {98, 0x36}, {110, 0x35}, {104, 0x35}, {98, 0x35}, {110, 0x34}, {104, 0x34}, {98, 0x34}, {110, 0x33}, {104, 0x33}, {98, 0x33}, {110, 0x32}, {104, 0x32}, {98, 0x32}, {110, 0x31}, {104, 0x31}, {98, 0x31}, {110, 0x30}, {104, 0x30}, {98, 0x30}, {110, 0x6}, {104, 0x6}, {98, 0x6}, {110, 0x5}, {104, 0x5}, {98, 0x5}, {110, 0x4}, {104, 0x4}, {98, 0x4}, {110, 0x3}, {104, 0x3}, {98, 0x3}, {110, 0x2}, {104, 0x2}, {98, 0x2}, {110, 0x1}, {104, 0x1}, {98, 0x1}, {110, 0x0}, {104, 0x0}, {98, 0x0}, {97, 0}, {96, 0}, {95, 0}, {94, 0}, {93, 0}, {92, 0}, {91, 0}, {90, 0}, {89, 0}, {88, 0}, {87, 0}, {86, 0}, {85, 0}, {84, 0}, {83, 0}, {82, 0}, {81, 0}, {80, 0}, {79, 0}, {78, 0}, {77, 0}, {76, 0}, {75, 0}, {74, 0}, {73, 0}, {72, 0}, {71, 0}, {70, 0}, {69, 0}, {68, 0}, {67, 0}, {66, 0}, {65, 0}, {64, 0}, {63, 0}, {62, 0}, {61, 0}, {60, 0}, {59, 0}, } }; static int il4965_fill_txpower_tbl(struct il_priv *il, u8 band, u16 channel, u8 is_ht40, u8 ctrl_chan_high, struct il4965_tx_power_db *tx_power_tbl) { u8 saturation_power; s32 target_power; s32 user_target_power; s32 power_limit; s32 current_temp; s32 reg_limit; s32 current_regulatory; s32 txatten_grp = CALIB_CH_GROUP_MAX; int i; int c; const struct il_channel_info *ch_info = NULL; struct il_eeprom_calib_ch_info ch_eeprom_info; const struct il_eeprom_calib_measure *measurement; s16 voltage; s32 init_voltage; s32 voltage_compensation; s32 degrees_per_05db_num; s32 degrees_per_05db_denom; s32 factory_temp; s32 temperature_comp[2]; s32 factory_gain_idx[2]; s32 factory_actual_pwr[2]; s32 power_idx; /* tx_power_user_lmt is in dBm, convert to half-dBm (half-dB units * are used for idxing into txpower table) */ user_target_power = 2 * il->tx_power_user_lmt; /* Get current (RXON) channel, band, width */ D_TXPOWER("chan %d band %d is_ht40 %d\n", channel, band, is_ht40); ch_info = il_get_channel_info(il, il->band, channel); if (!il_is_channel_valid(ch_info)) return -EINVAL; /* get txatten group, used to select 1) thermal txpower adjustment * and 2) mimo txpower balance between Tx chains. */ txatten_grp = il4965_get_tx_atten_grp(channel); if (txatten_grp < 0) { IL_ERR("Can't find txatten group for channel %d.\n", channel); return txatten_grp; } D_TXPOWER("channel %d belongs to txatten group %d\n", channel, txatten_grp); if (is_ht40) { if (ctrl_chan_high) channel -= 2; else channel += 2; } /* hardware txpower limits ... * saturation (clipping distortion) txpowers are in half-dBm */ if (band) saturation_power = il->calib_info->saturation_power24; else saturation_power = il->calib_info->saturation_power52; if (saturation_power < IL_TX_POWER_SATURATION_MIN || saturation_power > IL_TX_POWER_SATURATION_MAX) { if (band) saturation_power = IL_TX_POWER_DEFAULT_SATURATION_24; else saturation_power = IL_TX_POWER_DEFAULT_SATURATION_52; } /* regulatory txpower limits ... reg_limit values are in half-dBm, * max_power_avg values are in dBm, convert * 2 */ if (is_ht40) reg_limit = ch_info->ht40_max_power_avg * 2; else reg_limit = ch_info->max_power_avg * 2; if ((reg_limit < IL_TX_POWER_REGULATORY_MIN) || (reg_limit > IL_TX_POWER_REGULATORY_MAX)) { if (band) reg_limit = IL_TX_POWER_DEFAULT_REGULATORY_24; else reg_limit = IL_TX_POWER_DEFAULT_REGULATORY_52; } /* Interpolate txpower calibration values for this channel, * based on factory calibration tests on spaced channels. */ il4965_interpolate_chan(il, channel, &ch_eeprom_info); /* calculate tx gain adjustment based on power supply voltage */ voltage = le16_to_cpu(il->calib_info->voltage); init_voltage = (s32) le32_to_cpu(il->card_alive_init.voltage); voltage_compensation = il4965_get_voltage_compensation(voltage, init_voltage); D_TXPOWER("curr volt %d eeprom volt %d volt comp %d\n", init_voltage, voltage, voltage_compensation); /* get current temperature (Celsius) */ current_temp = max(il->temperature, IL_TX_POWER_TEMPERATURE_MIN); current_temp = min(il->temperature, IL_TX_POWER_TEMPERATURE_MAX); current_temp = KELVIN_TO_CELSIUS(current_temp); /* select thermal txpower adjustment params, based on channel group * (same frequency group used for mimo txatten adjustment) */ degrees_per_05db_num = tx_power_cmp_tble[txatten_grp].degrees_per_05db_a; degrees_per_05db_denom = tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom; /* get per-chain txpower values from factory measurements */ for (c = 0; c < 2; c++) { measurement = &ch_eeprom_info.measurements[c][1]; /* txgain adjustment (in half-dB steps) based on difference * between factory and current temperature */ factory_temp = measurement->temperature; il4965_math_div_round((current_temp - factory_temp) * degrees_per_05db_denom, degrees_per_05db_num, &temperature_comp[c]); factory_gain_idx[c] = measurement->gain_idx; factory_actual_pwr[c] = measurement->actual_pow; D_TXPOWER("chain = %d\n", c); D_TXPOWER("fctry tmp %d, " "curr tmp %d, comp %d steps\n", factory_temp, current_temp, temperature_comp[c]); D_TXPOWER("fctry idx %d, fctry pwr %d\n", factory_gain_idx[c], factory_actual_pwr[c]); } /* for each of 33 bit-rates (including 1 for CCK) */ for (i = 0; i < POWER_TBL_NUM_ENTRIES; i++) { u8 is_mimo_rate; union il4965_tx_power_dual_stream tx_power; /* for mimo, reduce each chain's txpower by half * (3dB, 6 steps), so total output power is regulatory * compliant. */ if (i & 0x8) { current_regulatory = reg_limit - IL_TX_POWER_MIMO_REGULATORY_COMPENSATION; is_mimo_rate = 1; } else { current_regulatory = reg_limit; is_mimo_rate = 0; } /* find txpower limit, either hardware or regulatory */ power_limit = saturation_power - back_off_table[i]; if (power_limit > current_regulatory) power_limit = current_regulatory; /* reduce user's txpower request if necessary * for this rate on this channel */ target_power = user_target_power; if (target_power > power_limit) target_power = power_limit; D_TXPOWER("rate %d sat %d reg %d usr %d tgt %d\n", i, saturation_power - back_off_table[i], current_regulatory, user_target_power, target_power); /* for each of 2 Tx chains (radio transmitters) */ for (c = 0; c < 2; c++) { s32 atten_value; if (is_mimo_rate) atten_value = (s32) le32_to_cpu(il->card_alive_init. tx_atten[txatten_grp][c]); else atten_value = 0; /* calculate idx; higher idx means lower txpower */ power_idx = (u8) (factory_gain_idx[c] - (target_power - factory_actual_pwr[c]) - temperature_comp[c] - voltage_compensation + atten_value); /* D_TXPOWER("calculated txpower idx %d\n", power_idx); */ if (power_idx < get_min_power_idx(i, band)) power_idx = get_min_power_idx(i, band); /* adjust 5 GHz idx to support negative idxes */ if (!band) power_idx += 9; /* CCK, rate 32, reduce txpower for CCK */ if (i == POWER_TBL_CCK_ENTRY) power_idx += IL_TX_POWER_CCK_COMPENSATION_C_STEP; /* stay within the table! */ if (power_idx > 107) { IL_WARN("txpower idx %d > 107\n", power_idx); power_idx = 107; } if (power_idx < 0) { IL_WARN("txpower idx %d < 0\n", power_idx); power_idx = 0; } /* fill txpower command for this rate/chain */ tx_power.s.radio_tx_gain[c] = gain_table[band][power_idx].radio; tx_power.s.dsp_predis_atten[c] = gain_table[band][power_idx].dsp; D_TXPOWER("chain %d mimo %d idx %d " "gain 0x%02x dsp %d\n", c, atten_value, power_idx, tx_power.s.radio_tx_gain[c], tx_power.s.dsp_predis_atten[c]); } /* for each chain */ tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw); } /* for each rate */ return 0; } /** * il4965_send_tx_power - Configure the TXPOWER level user limit * * Uses the active RXON for channel, band, and characteristics (ht40, high) * The power limit is taken from il->tx_power_user_lmt. */ static int il4965_send_tx_power(struct il_priv *il) { struct il4965_txpowertable_cmd cmd = { 0 }; int ret; u8 band = 0; bool is_ht40 = false; u8 ctrl_chan_high = 0; if (WARN_ONCE (test_bit(S_SCAN_HW, &il->status), "TX Power requested while scanning!\n")) return -EAGAIN; band = il->band == IEEE80211_BAND_2GHZ; is_ht40 = iw4965_is_ht40_channel(il->active.flags); if (is_ht40 && (il->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK)) ctrl_chan_high = 1; cmd.band = band; cmd.channel = il->active.channel; ret = il4965_fill_txpower_tbl(il, band, le16_to_cpu(il->active.channel), is_ht40, ctrl_chan_high, &cmd.tx_power); if (ret) goto out; ret = il_send_cmd_pdu(il, C_TX_PWR_TBL, sizeof(cmd), &cmd); out: return ret; } static int il4965_send_rxon_assoc(struct il_priv *il) { int ret = 0; struct il4965_rxon_assoc_cmd rxon_assoc; const struct il_rxon_cmd *rxon1 = &il->staging; const struct il_rxon_cmd *rxon2 = &il->active; if (rxon1->flags == rxon2->flags && rxon1->filter_flags == rxon2->filter_flags && rxon1->cck_basic_rates == rxon2->cck_basic_rates && rxon1->ofdm_ht_single_stream_basic_rates == rxon2->ofdm_ht_single_stream_basic_rates && rxon1->ofdm_ht_dual_stream_basic_rates == rxon2->ofdm_ht_dual_stream_basic_rates && rxon1->rx_chain == rxon2->rx_chain && rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates) { D_INFO("Using current RXON_ASSOC. Not resending.\n"); return 0; } rxon_assoc.flags = il->staging.flags; rxon_assoc.filter_flags = il->staging.filter_flags; rxon_assoc.ofdm_basic_rates = il->staging.ofdm_basic_rates; rxon_assoc.cck_basic_rates = il->staging.cck_basic_rates; rxon_assoc.reserved = 0; rxon_assoc.ofdm_ht_single_stream_basic_rates = il->staging.ofdm_ht_single_stream_basic_rates; rxon_assoc.ofdm_ht_dual_stream_basic_rates = il->staging.ofdm_ht_dual_stream_basic_rates; rxon_assoc.rx_chain_select_flags = il->staging.rx_chain; ret = il_send_cmd_pdu_async(il, C_RXON_ASSOC, sizeof(rxon_assoc), &rxon_assoc, NULL); return ret; } static int il4965_commit_rxon(struct il_priv *il) { /* cast away the const for active_rxon in this function */ struct il_rxon_cmd *active_rxon = (void *)&il->active; int ret; bool new_assoc = !!(il->staging.filter_flags & RXON_FILTER_ASSOC_MSK); if (!il_is_alive(il)) return -EBUSY; /* always get timestamp with Rx frame */ il->staging.flags |= RXON_FLG_TSF2HOST_MSK; ret = il_check_rxon_cmd(il); if (ret) { IL_ERR("Invalid RXON configuration. Not committing.\n"); return -EINVAL; } /* * receive commit_rxon request * abort any previous channel switch if still in process */ if (test_bit(S_CHANNEL_SWITCH_PENDING, &il->status) && il->switch_channel != il->staging.channel) { D_11H("abort channel switch on %d\n", le16_to_cpu(il->switch_channel)); il_chswitch_done(il, false); } /* If we don't need to send a full RXON, we can use * il_rxon_assoc_cmd which is used to reconfigure filter * and other flags for the current radio configuration. */ if (!il_full_rxon_required(il)) { ret = il_send_rxon_assoc(il); if (ret) { IL_ERR("Error setting RXON_ASSOC (%d)\n", ret); return ret; } memcpy(active_rxon, &il->staging, sizeof(*active_rxon)); il_print_rx_config_cmd(il); /* * We do not commit tx power settings while channel changing, * do it now if tx power changed. */ il_set_tx_power(il, il->tx_power_next, false); return 0; } /* If we are currently associated and the new config requires * an RXON_ASSOC and the new config wants the associated mask enabled, * we must clear the associated from the active configuration * before we apply the new config */ if (il_is_associated(il) && new_assoc) { D_INFO("Toggling associated bit on current RXON\n"); active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; ret = il_send_cmd_pdu(il, C_RXON, sizeof(struct il_rxon_cmd), active_rxon); /* If the mask clearing failed then we set * active_rxon back to what it was previously */ if (ret) { active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK; IL_ERR("Error clearing ASSOC_MSK (%d)\n", ret); return ret; } il_clear_ucode_stations(il); il_restore_stations(il); ret = il4965_restore_default_wep_keys(il); if (ret) { IL_ERR("Failed to restore WEP keys (%d)\n", ret); return ret; } } D_INFO("Sending RXON\n" "* with%s RXON_FILTER_ASSOC_MSK\n" "* channel = %d\n" "* bssid = %pM\n", (new_assoc ? "" : "out"), le16_to_cpu(il->staging.channel), il->staging.bssid_addr); il_set_rxon_hwcrypto(il, !il->cfg->mod_params->sw_crypto); /* Apply the new configuration * RXON unassoc clears the station table in uCode so restoration of * stations is needed after it (the RXON command) completes */ if (!new_assoc) { ret = il_send_cmd_pdu(il, C_RXON, sizeof(struct il_rxon_cmd), &il->staging); if (ret) { IL_ERR("Error setting new RXON (%d)\n", ret); return ret; } D_INFO("Return from !new_assoc RXON.\n"); memcpy(active_rxon, &il->staging, sizeof(*active_rxon)); il_clear_ucode_stations(il); il_restore_stations(il); ret = il4965_restore_default_wep_keys(il); if (ret) { IL_ERR("Failed to restore WEP keys (%d)\n", ret); return ret; } } if (new_assoc) { il->start_calib = 0; /* Apply the new configuration * RXON assoc doesn't clear the station table in uCode, */ ret = il_send_cmd_pdu(il, C_RXON, sizeof(struct il_rxon_cmd), &il->staging); if (ret) { IL_ERR("Error setting new RXON (%d)\n", ret); return ret; } memcpy(active_rxon, &il->staging, sizeof(*active_rxon)); } il_print_rx_config_cmd(il); il4965_init_sensitivity(il); /* If we issue a new RXON command which required a tune then we must * send a new TXPOWER command or we won't be able to Tx any frames */ ret = il_set_tx_power(il, il->tx_power_next, true); if (ret) { IL_ERR("Error sending TX power (%d)\n", ret); return ret; } return 0; } static int il4965_hw_channel_switch(struct il_priv *il, struct ieee80211_channel_switch *ch_switch) { int rc; u8 band = 0; bool is_ht40 = false; u8 ctrl_chan_high = 0; struct il4965_channel_switch_cmd cmd; const struct il_channel_info *ch_info; u32 switch_time_in_usec, ucode_switch_time; u16 ch; u32 tsf_low; u8 switch_count; u16 beacon_interval = le16_to_cpu(il->timing.beacon_interval); struct ieee80211_vif *vif = il->vif; band = (il->band == IEEE80211_BAND_2GHZ); if (WARN_ON_ONCE(vif == NULL)) return -EIO; is_ht40 = iw4965_is_ht40_channel(il->staging.flags); if (is_ht40 && (il->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK)) ctrl_chan_high = 1; cmd.band = band; cmd.expect_beacon = 0; ch = ch_switch->channel->hw_value; cmd.channel = cpu_to_le16(ch); cmd.rxon_flags = il->staging.flags; cmd.rxon_filter_flags = il->staging.filter_flags; switch_count = ch_switch->count; tsf_low = ch_switch->timestamp & 0x0ffffffff; /* * calculate the ucode channel switch time * adding TSF as one of the factor for when to switch */ if (il->ucode_beacon_time > tsf_low && beacon_interval) { if (switch_count > ((il->ucode_beacon_time - tsf_low) / beacon_interval)) { switch_count -= (il->ucode_beacon_time - tsf_low) / beacon_interval; } else switch_count = 0; } if (switch_count <= 1) cmd.switch_time = cpu_to_le32(il->ucode_beacon_time); else { switch_time_in_usec = vif->bss_conf.beacon_int * switch_count * TIME_UNIT; ucode_switch_time = il_usecs_to_beacons(il, switch_time_in_usec, beacon_interval); cmd.switch_time = il_add_beacon_time(il, il->ucode_beacon_time, ucode_switch_time, beacon_interval); } D_11H("uCode time for the switch is 0x%x\n", cmd.switch_time); ch_info = il_get_channel_info(il, il->band, ch); if (ch_info) cmd.expect_beacon = il_is_channel_radar(ch_info); else { IL_ERR("invalid channel switch from %u to %u\n", il->active.channel, ch); return -EFAULT; } rc = il4965_fill_txpower_tbl(il, band, ch, is_ht40, ctrl_chan_high, &cmd.tx_power); if (rc) { D_11H("error:%d fill txpower_tbl\n", rc); return rc; } return il_send_cmd_pdu(il, C_CHANNEL_SWITCH, sizeof(cmd), &cmd); } /** * il4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array */ static void il4965_txq_update_byte_cnt_tbl(struct il_priv *il, struct il_tx_queue *txq, u16 byte_cnt) { struct il4965_scd_bc_tbl *scd_bc_tbl = il->scd_bc_tbls.addr; int txq_id = txq->q.id; int write_ptr = txq->q.write_ptr; int len = byte_cnt + IL_TX_CRC_SIZE + IL_TX_DELIMITER_SIZE; __le16 bc_ent; WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX); bc_ent = cpu_to_le16(len & 0xFFF); /* Set up byte count within first 256 entries */ scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; /* If within first 64 entries, duplicate at end */ if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; } /** * il4965_hw_get_temperature - return the calibrated temperature (in Kelvin) * @stats: Provides the temperature reading from the uCode * * A return of <0 indicates bogus data in the stats */ static int il4965_hw_get_temperature(struct il_priv *il) { s32 temperature; s32 vt; s32 R1, R2, R3; u32 R4; if (test_bit(S_TEMPERATURE, &il->status) && (il->_4965.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK)) { D_TEMP("Running HT40 temperature calibration\n"); R1 = (s32) le32_to_cpu(il->card_alive_init.therm_r1[1]); R2 = (s32) le32_to_cpu(il->card_alive_init.therm_r2[1]); R3 = (s32) le32_to_cpu(il->card_alive_init.therm_r3[1]); R4 = le32_to_cpu(il->card_alive_init.therm_r4[1]); } else { D_TEMP("Running temperature calibration\n"); R1 = (s32) le32_to_cpu(il->card_alive_init.therm_r1[0]); R2 = (s32) le32_to_cpu(il->card_alive_init.therm_r2[0]); R3 = (s32) le32_to_cpu(il->card_alive_init.therm_r3[0]); R4 = le32_to_cpu(il->card_alive_init.therm_r4[0]); } /* * Temperature is only 23 bits, so sign extend out to 32. * * NOTE If we haven't received a stats notification yet * with an updated temperature, use R4 provided to us in the * "initialize" ALIVE response. */ if (!test_bit(S_TEMPERATURE, &il->status)) vt = sign_extend32(R4, 23); else vt = sign_extend32(le32_to_cpu (il->_4965.stats.general.common.temperature), 23); D_TEMP("Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt); if (R3 == R1) { IL_ERR("Calibration conflict R1 == R3\n"); return -1; } /* Calculate temperature in degrees Kelvin, adjust by 97%. * Add offset to center the adjustment around 0 degrees Centigrade. */ temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2); temperature /= (R3 - R1); temperature = (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET; D_TEMP("Calibrated temperature: %dK, %dC\n", temperature, KELVIN_TO_CELSIUS(temperature)); return temperature; } /* Adjust Txpower only if temperature variance is greater than threshold. */ #define IL_TEMPERATURE_THRESHOLD 3 /** * il4965_is_temp_calib_needed - determines if new calibration is needed * * If the temperature changed has changed sufficiently, then a recalibration * is needed. * * Assumes caller will replace il->last_temperature once calibration * executed. */ static int il4965_is_temp_calib_needed(struct il_priv *il) { int temp_diff; if (!test_bit(S_STATS, &il->status)) { D_TEMP("Temperature not updated -- no stats.\n"); return 0; } temp_diff = il->temperature - il->last_temperature; /* get absolute value */ if (temp_diff < 0) { D_POWER("Getting cooler, delta %d\n", temp_diff); temp_diff = -temp_diff; } else if (temp_diff == 0) D_POWER("Temperature unchanged\n"); else D_POWER("Getting warmer, delta %d\n", temp_diff); if (temp_diff < IL_TEMPERATURE_THRESHOLD) { D_POWER(" => thermal txpower calib not needed\n"); return 0; } D_POWER(" => thermal txpower calib needed\n"); return 1; } void il4965_temperature_calib(struct il_priv *il) { s32 temp; temp = il4965_hw_get_temperature(il); if (IL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(temp)) return; if (il->temperature != temp) { if (il->temperature) D_TEMP("Temperature changed " "from %dC to %dC\n", KELVIN_TO_CELSIUS(il->temperature), KELVIN_TO_CELSIUS(temp)); else D_TEMP("Temperature " "initialized to %dC\n", KELVIN_TO_CELSIUS(temp)); } il->temperature = temp; set_bit(S_TEMPERATURE, &il->status); if (!il->disable_tx_power_cal && unlikely(!test_bit(S_SCANNING, &il->status)) && il4965_is_temp_calib_needed(il)) queue_work(il->workqueue, &il->txpower_work); } static u16 il4965_get_hcmd_size(u8 cmd_id, u16 len) { switch (cmd_id) { case C_RXON: return (u16) sizeof(struct il4965_rxon_cmd); default: return len; } } static u16 il4965_build_addsta_hcmd(const struct il_addsta_cmd *cmd, u8 * data) { struct il4965_addsta_cmd *addsta = (struct il4965_addsta_cmd *)data; addsta->mode = cmd->mode; memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify)); memcpy(&addsta->key, &cmd->key, sizeof(struct il4965_keyinfo)); addsta->station_flags = cmd->station_flags; addsta->station_flags_msk = cmd->station_flags_msk; addsta->tid_disable_tx = cmd->tid_disable_tx; addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid; addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid; addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn; addsta->sleep_tx_count = cmd->sleep_tx_count; addsta->reserved1 = cpu_to_le16(0); addsta->reserved2 = cpu_to_le16(0); return (u16) sizeof(struct il4965_addsta_cmd); } static void il4965_post_scan(struct il_priv *il) { /* * Since setting the RXON may have been deferred while * performing the scan, fire one off if needed */ if (memcmp(&il->staging, &il->active, sizeof(il->staging))) il_commit_rxon(il); } static void il4965_post_associate(struct il_priv *il) { struct ieee80211_vif *vif = il->vif; struct ieee80211_conf *conf = NULL; int ret = 0; if (!vif || !il->is_open) return; if (test_bit(S_EXIT_PENDING, &il->status)) return; il_scan_cancel_timeout(il, 200); conf = &il->hw->conf; il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; il_commit_rxon(il); ret = il_send_rxon_timing(il); if (ret) IL_WARN("RXON timing - " "Attempting to continue.\n"); il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; il_set_rxon_ht(il, &il->current_ht_config); if (il->ops->set_rxon_chain) il->ops->set_rxon_chain(il); il->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid); D_ASSOC("assoc id %d beacon interval %d\n", vif->bss_conf.aid, vif->bss_conf.beacon_int); if (vif->bss_conf.use_short_preamble) il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; else il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; if (il->staging.flags & RXON_FLG_BAND_24G_MSK) { if (vif->bss_conf.use_short_slot) il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; else il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; } il_commit_rxon(il); D_ASSOC("Associated as %d to: %pM\n", vif->bss_conf.aid, il->active.bssid_addr); switch (vif->type) { case NL80211_IFTYPE_STATION: break; case NL80211_IFTYPE_ADHOC: il4965_send_beacon_cmd(il); break; default: IL_ERR("%s Should not be called in %d mode\n", __func__, vif->type); break; } /* the chain noise calibration will enabled PM upon completion * If chain noise has already been run, then we need to enable * power management here */ if (il->chain_noise_data.state == IL_CHAIN_NOISE_DONE) il_power_update_mode(il, false); /* Enable Rx differential gain and sensitivity calibrations */ il4965_chain_noise_reset(il); il->start_calib = 1; } static void il4965_config_ap(struct il_priv *il) { struct ieee80211_vif *vif = il->vif; int ret = 0; lockdep_assert_held(&il->mutex); if (test_bit(S_EXIT_PENDING, &il->status)) return; /* The following should be done only at AP bring up */ if (!il_is_associated(il)) { /* RXON - unassoc (to set timing command) */ il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; il_commit_rxon(il); /* RXON Timing */ ret = il_send_rxon_timing(il); if (ret) IL_WARN("RXON timing failed - " "Attempting to continue.\n"); /* AP has all antennas */ il->chain_noise_data.active_chains = il->hw_params.valid_rx_ant; il_set_rxon_ht(il, &il->current_ht_config); if (il->ops->set_rxon_chain) il->ops->set_rxon_chain(il); il->staging.assoc_id = 0; if (vif->bss_conf.use_short_preamble) il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; else il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; if (il->staging.flags & RXON_FLG_BAND_24G_MSK) { if (vif->bss_conf.use_short_slot) il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; else il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; } /* need to send beacon cmd before committing assoc RXON! */ il4965_send_beacon_cmd(il); /* restore RXON assoc */ il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; il_commit_rxon(il); } il4965_send_beacon_cmd(il); } const struct il_ops il4965_ops = { .txq_update_byte_cnt_tbl = il4965_txq_update_byte_cnt_tbl, .txq_attach_buf_to_tfd = il4965_hw_txq_attach_buf_to_tfd, .txq_free_tfd = il4965_hw_txq_free_tfd, .txq_init = il4965_hw_tx_queue_init, .is_valid_rtc_data_addr = il4965_hw_valid_rtc_data_addr, .init_alive_start = il4965_init_alive_start, .load_ucode = il4965_load_bsm, .dump_nic_error_log = il4965_dump_nic_error_log, .dump_fh = il4965_dump_fh, .set_channel_switch = il4965_hw_channel_switch, .apm_init = il_apm_init, .send_tx_power = il4965_send_tx_power, .update_chain_flags = il4965_update_chain_flags, .eeprom_acquire_semaphore = il4965_eeprom_acquire_semaphore, .eeprom_release_semaphore = il4965_eeprom_release_semaphore, .rxon_assoc = il4965_send_rxon_assoc, .commit_rxon = il4965_commit_rxon, .set_rxon_chain = il4965_set_rxon_chain, .get_hcmd_size = il4965_get_hcmd_size, .build_addsta_hcmd = il4965_build_addsta_hcmd, .request_scan = il4965_request_scan, .post_scan = il4965_post_scan, .post_associate = il4965_post_associate, .config_ap = il4965_config_ap, .manage_ibss_station = il4965_manage_ibss_station, .update_bcast_stations = il4965_update_bcast_stations, .send_led_cmd = il4965_send_led_cmd, }; struct il_cfg il4965_cfg = { .name = "Intel(R) Wireless WiFi Link 4965AGN", .fw_name_pre = IL4965_FW_PRE, .ucode_api_max = IL4965_UCODE_API_MAX, .ucode_api_min = IL4965_UCODE_API_MIN, .sku = IL_SKU_A | IL_SKU_G | IL_SKU_N, .valid_tx_ant = ANT_AB, .valid_rx_ant = ANT_ABC, .eeprom_ver = EEPROM_4965_EEPROM_VERSION, .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION, .mod_params = &il4965_mod_params, .led_mode = IL_LED_BLINK, /* * Force use of chains B and C for scan RX on 5 GHz band * because the device has off-channel reception on chain A. */ .scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC, .eeprom_size = IL4965_EEPROM_IMG_SIZE, .num_of_queues = IL49_NUM_QUEUES, .num_of_ampdu_queues = IL49_NUM_AMPDU_QUEUES, .pll_cfg_val = 0, .set_l0s = true, .use_bsm = true, .led_compensation = 61, .chain_noise_num_beacons = IL4965_CAL_NUM_BEACONS, .wd_timeout = IL_DEF_WD_TIMEOUT, .temperature_kelvin = true, .ucode_tracing = true, .sensitivity_calib_by_driver = true, .chain_noise_calib_by_driver = true, .regulatory_bands = { EEPROM_REGULATORY_BAND_1_CHANNELS, EEPROM_REGULATORY_BAND_2_CHANNELS, EEPROM_REGULATORY_BAND_3_CHANNELS, EEPROM_REGULATORY_BAND_4_CHANNELS, EEPROM_REGULATORY_BAND_5_CHANNELS, EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS, EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS }, }; /* Module firmware */ MODULE_FIRMWARE(IL4965_MODULE_FIRMWARE(IL4965_UCODE_API_MAX));
gpl-2.0
robcore/machinex_kernelv2
arch/powerpc/platforms/cell/beat_interrupt.c
6662
6874
/* * Celleb/Beat Interrupt controller * * (C) Copyright 2006-2007 TOSHIBA CORPORATION * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/percpu.h> #include <linux/types.h> #include <asm/machdep.h> #include "beat_interrupt.h" #include "beat_wrapper.h" #define MAX_IRQS NR_IRQS static DEFINE_RAW_SPINLOCK(beatic_irq_mask_lock); static uint64_t beatic_irq_mask_enable[(MAX_IRQS+255)/64]; static uint64_t beatic_irq_mask_ack[(MAX_IRQS+255)/64]; static struct irq_domain *beatic_host; /* * In this implementation, "virq" == "IRQ plug number", * "(irq_hw_number_t)hwirq" == "IRQ outlet number". */ /* assumption: locked */ static inline void beatic_update_irq_mask(unsigned int irq_plug) { int off; unsigned long masks[4]; off = (irq_plug / 256) * 4; masks[0] = beatic_irq_mask_enable[off + 0] & beatic_irq_mask_ack[off + 0]; masks[1] = beatic_irq_mask_enable[off + 1] & beatic_irq_mask_ack[off + 1]; masks[2] = beatic_irq_mask_enable[off + 2] & beatic_irq_mask_ack[off + 2]; masks[3] = beatic_irq_mask_enable[off + 3] & beatic_irq_mask_ack[off + 3]; if (beat_set_interrupt_mask(irq_plug&~255UL, masks[0], masks[1], masks[2], masks[3]) != 0) panic("Failed to set mask IRQ!"); } static void beatic_mask_irq(struct irq_data *d) { unsigned long flags; raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags); beatic_irq_mask_enable[d->irq/64] &= ~(1UL << (63 - (d->irq%64))); beatic_update_irq_mask(d->irq); raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); } static void beatic_unmask_irq(struct irq_data *d) { unsigned long flags; raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags); beatic_irq_mask_enable[d->irq/64] |= 1UL << (63 - (d->irq%64)); beatic_update_irq_mask(d->irq); raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); } static void beatic_ack_irq(struct irq_data *d) { unsigned long flags; raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags); beatic_irq_mask_ack[d->irq/64] &= ~(1UL << (63 - (d->irq%64))); beatic_update_irq_mask(d->irq); raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); } static void beatic_end_irq(struct irq_data *d) { s64 err; unsigned long flags; err = beat_downcount_of_interrupt(d->irq); if (err != 0) { if ((err & 0xFFFFFFFF) != 0xFFFFFFF5) /* -11: wrong state */ panic("Failed to downcount IRQ! Error = %16llx", err); printk(KERN_ERR "IRQ over-downcounted, plug %d\n", d->irq); } raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags); beatic_irq_mask_ack[d->irq/64] |= 1UL << (63 - (d->irq%64)); beatic_update_irq_mask(d->irq); raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); } static struct irq_chip beatic_pic = { .name = "CELL-BEAT", .irq_unmask = beatic_unmask_irq, .irq_mask = beatic_mask_irq, .irq_eoi = beatic_end_irq, }; /* * Dispose binding hardware IRQ number (hw) and Virtuql IRQ number (virq), * update flags. * * Note that the number (virq) is already assigned at upper layer. */ static void beatic_pic_host_unmap(struct irq_domain *h, unsigned int virq) { beat_destruct_irq_plug(virq); } /* * Create or update binding hardware IRQ number (hw) and Virtuql * IRQ number (virq). This is called only once for a given mapping. * * Note that the number (virq) is already assigned at upper layer. */ static int beatic_pic_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { int64_t err; err = beat_construct_and_connect_irq_plug(virq, hw); if (err < 0) return -EIO; irq_set_status_flags(virq, IRQ_LEVEL); irq_set_chip_and_handler(virq, &beatic_pic, handle_fasteoi_irq); return 0; } /* * Translate device-tree interrupt spec to irq_hw_number_t style (ulong), * to pass away to irq_create_mapping(). * * Called from irq_create_of_mapping() only. * Note: We have only 1 entry to translate. */ static int beatic_pic_host_xlate(struct irq_domain *h, struct device_node *ct, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_flags) { const u64 *intspec2 = (const u64 *)intspec; *out_hwirq = *intspec2; *out_flags |= IRQ_TYPE_LEVEL_LOW; return 0; } static int beatic_pic_host_match(struct irq_domain *h, struct device_node *np) { /* Match all */ return 1; } static const struct irq_domain_ops beatic_pic_host_ops = { .map = beatic_pic_host_map, .unmap = beatic_pic_host_unmap, .xlate = beatic_pic_host_xlate, .match = beatic_pic_host_match, }; /* * Get an IRQ number * Note: returns VIRQ */ static inline unsigned int beatic_get_irq_plug(void) { int i; uint64_t pending[4], ub; for (i = 0; i < MAX_IRQS; i += 256) { beat_detect_pending_interrupts(i, pending); __asm__ ("cntlzd %0,%1":"=r"(ub): "r"(pending[0] & beatic_irq_mask_enable[i/64+0] & beatic_irq_mask_ack[i/64+0])); if (ub != 64) return i + ub + 0; __asm__ ("cntlzd %0,%1":"=r"(ub): "r"(pending[1] & beatic_irq_mask_enable[i/64+1] & beatic_irq_mask_ack[i/64+1])); if (ub != 64) return i + ub + 64; __asm__ ("cntlzd %0,%1":"=r"(ub): "r"(pending[2] & beatic_irq_mask_enable[i/64+2] & beatic_irq_mask_ack[i/64+2])); if (ub != 64) return i + ub + 128; __asm__ ("cntlzd %0,%1":"=r"(ub): "r"(pending[3] & beatic_irq_mask_enable[i/64+3] & beatic_irq_mask_ack[i/64+3])); if (ub != 64) return i + ub + 192; } return NO_IRQ; } unsigned int beatic_get_irq(void) { unsigned int ret; ret = beatic_get_irq_plug(); if (ret != NO_IRQ) beatic_ack_irq(irq_get_irq_data(ret)); return ret; } /* */ void __init beatic_init_IRQ(void) { int i; memset(beatic_irq_mask_enable, 0, sizeof(beatic_irq_mask_enable)); memset(beatic_irq_mask_ack, 255, sizeof(beatic_irq_mask_ack)); for (i = 0; i < MAX_IRQS; i += 256) beat_set_interrupt_mask(i, 0L, 0L, 0L, 0L); /* Set out get_irq function */ ppc_md.get_irq = beatic_get_irq; /* Allocate an irq host */ beatic_host = irq_domain_add_nomap(NULL, 0, &beatic_pic_host_ops, NULL); BUG_ON(beatic_host == NULL); irq_set_default_host(beatic_host); } void beatic_deinit_IRQ(void) { int i; for (i = 1; i < nr_irqs; i++) beat_destruct_irq_plug(i); }
gpl-2.0
dl12345/kernel_sony_kitakami
drivers/infiniband/hw/ipath/ipath_sdma.c
12038
24081
/* * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/spinlock.h> #include <linux/gfp.h> #include "ipath_kernel.h" #include "ipath_verbs.h" #include "ipath_common.h" #define SDMA_DESCQ_SZ PAGE_SIZE /* 256 entries per 4KB page */ static void vl15_watchdog_enq(struct ipath_devdata *dd) { /* ipath_sdma_lock must already be held */ if (atomic_inc_return(&dd->ipath_sdma_vl15_count) == 1) { unsigned long interval = (HZ + 19) / 20; dd->ipath_sdma_vl15_timer.expires = jiffies + interval; add_timer(&dd->ipath_sdma_vl15_timer); } } static void vl15_watchdog_deq(struct ipath_devdata *dd) { /* ipath_sdma_lock must already be held */ if (atomic_dec_return(&dd->ipath_sdma_vl15_count) != 0) { unsigned long interval = (HZ + 19) / 20; mod_timer(&dd->ipath_sdma_vl15_timer, jiffies + interval); } else { del_timer(&dd->ipath_sdma_vl15_timer); } } static void vl15_watchdog_timeout(unsigned long opaque) { struct ipath_devdata *dd = (struct ipath_devdata *)opaque; if (atomic_read(&dd->ipath_sdma_vl15_count) != 0) { ipath_dbg("vl15 watchdog timeout - clearing\n"); ipath_cancel_sends(dd, 1); ipath_hol_down(dd); } else { ipath_dbg("vl15 watchdog timeout - " "condition already cleared\n"); } } static void unmap_desc(struct ipath_devdata *dd, unsigned head) { __le64 *descqp = &dd->ipath_sdma_descq[head].qw[0]; u64 desc[2]; dma_addr_t addr; size_t len; desc[0] = le64_to_cpu(descqp[0]); desc[1] = le64_to_cpu(descqp[1]); addr = (desc[1] << 32) | (desc[0] >> 32); len = (desc[0] >> 14) & (0x7ffULL << 2); dma_unmap_single(&dd->pcidev->dev, addr, len, DMA_TO_DEVICE); } /* * ipath_sdma_lock should be locked before calling this. */ int ipath_sdma_make_progress(struct ipath_devdata *dd) { struct list_head *lp = NULL; struct ipath_sdma_txreq *txp = NULL; u16 dmahead; u16 start_idx = 0; int progress = 0; if (!list_empty(&dd->ipath_sdma_activelist)) { lp = dd->ipath_sdma_activelist.next; txp = list_entry(lp, struct ipath_sdma_txreq, list); start_idx = txp->start_idx; } /* * Read the SDMA head register in order to know that the * interrupt clear has been written to the chip. * Otherwise, we may not get an interrupt for the last * descriptor in the queue. */ dmahead = (u16)ipath_read_kreg32(dd, dd->ipath_kregs->kr_senddmahead); /* sanity check return value for error handling (chip reset, etc.) */ if (dmahead >= dd->ipath_sdma_descq_cnt) goto done; while (dd->ipath_sdma_descq_head != dmahead) { if (txp && txp->flags & IPATH_SDMA_TXREQ_F_FREEDESC && dd->ipath_sdma_descq_head == start_idx) { unmap_desc(dd, dd->ipath_sdma_descq_head); start_idx++; if (start_idx == dd->ipath_sdma_descq_cnt) start_idx = 0; } /* increment free count and head */ dd->ipath_sdma_descq_removed++; if (++dd->ipath_sdma_descq_head == dd->ipath_sdma_descq_cnt) dd->ipath_sdma_descq_head = 0; if (txp && txp->next_descq_idx == dd->ipath_sdma_descq_head) { /* move to notify list */ if (txp->flags & IPATH_SDMA_TXREQ_F_VL15) vl15_watchdog_deq(dd); list_move_tail(lp, &dd->ipath_sdma_notifylist); if (!list_empty(&dd->ipath_sdma_activelist)) { lp = dd->ipath_sdma_activelist.next; txp = list_entry(lp, struct ipath_sdma_txreq, list); start_idx = txp->start_idx; } else { lp = NULL; txp = NULL; } } progress = 1; } if (progress) tasklet_hi_schedule(&dd->ipath_sdma_notify_task); done: return progress; } static void ipath_sdma_notify(struct ipath_devdata *dd, struct list_head *list) { struct ipath_sdma_txreq *txp, *txp_next; list_for_each_entry_safe(txp, txp_next, list, list) { list_del_init(&txp->list); if (txp->callback) (*txp->callback)(txp->callback_cookie, txp->callback_status); } } static void sdma_notify_taskbody(struct ipath_devdata *dd) { unsigned long flags; struct list_head list; INIT_LIST_HEAD(&list); spin_lock_irqsave(&dd->ipath_sdma_lock, flags); list_splice_init(&dd->ipath_sdma_notifylist, &list); spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); ipath_sdma_notify(dd, &list); /* * The IB verbs layer needs to see the callback before getting * the call to ipath_ib_piobufavail() because the callback * handles releasing resources the next send will need. * Otherwise, we could do these calls in * ipath_sdma_make_progress(). */ ipath_ib_piobufavail(dd->verbs_dev); } static void sdma_notify_task(unsigned long opaque) { struct ipath_devdata *dd = (struct ipath_devdata *)opaque; if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status)) sdma_notify_taskbody(dd); } static void dump_sdma_state(struct ipath_devdata *dd) { unsigned long reg; reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmastatus); ipath_cdbg(VERBOSE, "kr_senddmastatus: 0x%016lx\n", reg); reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendctrl); ipath_cdbg(VERBOSE, "kr_sendctrl: 0x%016lx\n", reg); reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask0); ipath_cdbg(VERBOSE, "kr_senddmabufmask0: 0x%016lx\n", reg); reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask1); ipath_cdbg(VERBOSE, "kr_senddmabufmask1: 0x%016lx\n", reg); reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask2); ipath_cdbg(VERBOSE, "kr_senddmabufmask2: 0x%016lx\n", reg); reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmatail); ipath_cdbg(VERBOSE, "kr_senddmatail: 0x%016lx\n", reg); reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmahead); ipath_cdbg(VERBOSE, "kr_senddmahead: 0x%016lx\n", reg); } static void sdma_abort_task(unsigned long opaque) { struct ipath_devdata *dd = (struct ipath_devdata *) opaque; u64 status; unsigned long flags; if (test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status)) return; spin_lock_irqsave(&dd->ipath_sdma_lock, flags); status = dd->ipath_sdma_status & IPATH_SDMA_ABORT_MASK; /* nothing to do */ if (status == IPATH_SDMA_ABORT_NONE) goto unlock; /* ipath_sdma_abort() is done, waiting for interrupt */ if (status == IPATH_SDMA_ABORT_DISARMED) { if (jiffies < dd->ipath_sdma_abort_intr_timeout) goto resched_noprint; /* give up, intr got lost somewhere */ ipath_dbg("give up waiting for SDMADISABLED intr\n"); __set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status); status = IPATH_SDMA_ABORT_ABORTED; } /* everything is stopped, time to clean up and restart */ if (status == IPATH_SDMA_ABORT_ABORTED) { struct ipath_sdma_txreq *txp, *txpnext; u64 hwstatus; int notify = 0; hwstatus = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmastatus); if ((hwstatus & (IPATH_SDMA_STATUS_SCORE_BOARD_DRAIN_IN_PROG | IPATH_SDMA_STATUS_ABORT_IN_PROG | IPATH_SDMA_STATUS_INTERNAL_SDMA_ENABLE)) || !(hwstatus & IPATH_SDMA_STATUS_SCB_EMPTY)) { if (dd->ipath_sdma_reset_wait > 0) { /* not done shutting down sdma */ --dd->ipath_sdma_reset_wait; goto resched; } ipath_cdbg(VERBOSE, "gave up waiting for quiescent " "status after SDMA reset, continuing\n"); dump_sdma_state(dd); } /* dequeue all "sent" requests */ list_for_each_entry_safe(txp, txpnext, &dd->ipath_sdma_activelist, list) { txp->callback_status = IPATH_SDMA_TXREQ_S_ABORTED; if (txp->flags & IPATH_SDMA_TXREQ_F_VL15) vl15_watchdog_deq(dd); list_move_tail(&txp->list, &dd->ipath_sdma_notifylist); notify = 1; } if (notify) tasklet_hi_schedule(&dd->ipath_sdma_notify_task); /* reset our notion of head and tail */ dd->ipath_sdma_descq_tail = 0; dd->ipath_sdma_descq_head = 0; dd->ipath_sdma_head_dma[0] = 0; dd->ipath_sdma_generation = 0; dd->ipath_sdma_descq_removed = dd->ipath_sdma_descq_added; /* Reset SendDmaLenGen */ ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, (u64) dd->ipath_sdma_descq_cnt | (1ULL << 18)); /* done with sdma state for a bit */ spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); /* * Don't restart sdma here (with the exception * below). Wait until link is up to ACTIVE. VL15 MADs * used to bring the link up use PIO, and multiple link * transitions otherwise cause the sdma engine to be * stopped and started multiple times. * The disable is done here, including the shadow, * so the state is kept consistent. * See ipath_restart_sdma() for the actual starting * of sdma. */ spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE; ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl); ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); /* make sure I see next message */ dd->ipath_sdma_abort_jiffies = 0; /* * Not everything that takes SDMA offline is a link * status change. If the link was up, restart SDMA. */ if (dd->ipath_flags & IPATH_LINKACTIVE) ipath_restart_sdma(dd); goto done; } resched: /* * for now, keep spinning * JAG - this is bad to just have default be a loop without * state change */ if (jiffies > dd->ipath_sdma_abort_jiffies) { ipath_dbg("looping with status 0x%08lx\n", dd->ipath_sdma_status); dd->ipath_sdma_abort_jiffies = jiffies + 5 * HZ; } resched_noprint: spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status)) tasklet_hi_schedule(&dd->ipath_sdma_abort_task); return; unlock: spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); done: return; } /* * This is called from interrupt context. */ void ipath_sdma_intr(struct ipath_devdata *dd) { unsigned long flags; spin_lock_irqsave(&dd->ipath_sdma_lock, flags); (void) ipath_sdma_make_progress(dd); spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); } static int alloc_sdma(struct ipath_devdata *dd) { int ret = 0; /* Allocate memory for SendDMA descriptor FIFO */ dd->ipath_sdma_descq = dma_alloc_coherent(&dd->pcidev->dev, SDMA_DESCQ_SZ, &dd->ipath_sdma_descq_phys, GFP_KERNEL); if (!dd->ipath_sdma_descq) { ipath_dev_err(dd, "failed to allocate SendDMA descriptor " "FIFO memory\n"); ret = -ENOMEM; goto done; } dd->ipath_sdma_descq_cnt = SDMA_DESCQ_SZ / sizeof(struct ipath_sdma_desc); /* Allocate memory for DMA of head register to memory */ dd->ipath_sdma_head_dma = dma_alloc_coherent(&dd->pcidev->dev, PAGE_SIZE, &dd->ipath_sdma_head_phys, GFP_KERNEL); if (!dd->ipath_sdma_head_dma) { ipath_dev_err(dd, "failed to allocate SendDMA head memory\n"); ret = -ENOMEM; goto cleanup_descq; } dd->ipath_sdma_head_dma[0] = 0; init_timer(&dd->ipath_sdma_vl15_timer); dd->ipath_sdma_vl15_timer.function = vl15_watchdog_timeout; dd->ipath_sdma_vl15_timer.data = (unsigned long)dd; atomic_set(&dd->ipath_sdma_vl15_count, 0); goto done; cleanup_descq: dma_free_coherent(&dd->pcidev->dev, SDMA_DESCQ_SZ, (void *)dd->ipath_sdma_descq, dd->ipath_sdma_descq_phys); dd->ipath_sdma_descq = NULL; dd->ipath_sdma_descq_phys = 0; done: return ret; } int setup_sdma(struct ipath_devdata *dd) { int ret = 0; unsigned i, n; u64 tmp64; u64 senddmabufmask[3] = { 0 }; unsigned long flags; ret = alloc_sdma(dd); if (ret) goto done; if (!dd->ipath_sdma_descq) { ipath_dev_err(dd, "SendDMA memory not allocated\n"); goto done; } /* * Set initial status as if we had been up, then gone down. * This lets initial start on transition to ACTIVE be the * same as restart after link flap. */ dd->ipath_sdma_status = IPATH_SDMA_ABORT_ABORTED; dd->ipath_sdma_abort_jiffies = 0; dd->ipath_sdma_generation = 0; dd->ipath_sdma_descq_tail = 0; dd->ipath_sdma_descq_head = 0; dd->ipath_sdma_descq_removed = 0; dd->ipath_sdma_descq_added = 0; /* Set SendDmaBase */ ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabase, dd->ipath_sdma_descq_phys); /* Set SendDmaLenGen */ tmp64 = dd->ipath_sdma_descq_cnt; tmp64 |= 1<<18; /* enable generation checking */ ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, tmp64); /* Set SendDmaTail */ ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, dd->ipath_sdma_descq_tail); /* Set SendDmaHeadAddr */ ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr, dd->ipath_sdma_head_phys); /* * Reserve all the former "kernel" piobufs, using high number range * so we get as many 4K buffers as possible */ n = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k; i = dd->ipath_lastport_piobuf + dd->ipath_pioreserved; ipath_chg_pioavailkernel(dd, i, n - i , 0); for (; i < n; ++i) { unsigned word = i / 64; unsigned bit = i & 63; BUG_ON(word >= 3); senddmabufmask[word] |= 1ULL << bit; } ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0, senddmabufmask[0]); ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1, senddmabufmask[1]); ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask2, senddmabufmask[2]); INIT_LIST_HEAD(&dd->ipath_sdma_activelist); INIT_LIST_HEAD(&dd->ipath_sdma_notifylist); tasklet_init(&dd->ipath_sdma_notify_task, sdma_notify_task, (unsigned long) dd); tasklet_init(&dd->ipath_sdma_abort_task, sdma_abort_task, (unsigned long) dd); /* * No use to turn on SDMA here, as link is probably not ACTIVE * Just mark it RUNNING and enable the interrupt, and let the * ipath_restart_sdma() on link transition to ACTIVE actually * enable it. */ spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); dd->ipath_sendctrl |= INFINIPATH_S_SDMAINTENABLE; ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl); ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); __set_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status); spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); done: return ret; } void teardown_sdma(struct ipath_devdata *dd) { struct ipath_sdma_txreq *txp, *txpnext; unsigned long flags; dma_addr_t sdma_head_phys = 0; dma_addr_t sdma_descq_phys = 0; void *sdma_descq = NULL; void *sdma_head_dma = NULL; spin_lock_irqsave(&dd->ipath_sdma_lock, flags); __clear_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status); __set_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status); __set_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status); spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); tasklet_kill(&dd->ipath_sdma_abort_task); tasklet_kill(&dd->ipath_sdma_notify_task); /* turn off sdma */ spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE; ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl); ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); spin_lock_irqsave(&dd->ipath_sdma_lock, flags); /* dequeue all "sent" requests */ list_for_each_entry_safe(txp, txpnext, &dd->ipath_sdma_activelist, list) { txp->callback_status = IPATH_SDMA_TXREQ_S_SHUTDOWN; if (txp->flags & IPATH_SDMA_TXREQ_F_VL15) vl15_watchdog_deq(dd); list_move_tail(&txp->list, &dd->ipath_sdma_notifylist); } spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); sdma_notify_taskbody(dd); del_timer_sync(&dd->ipath_sdma_vl15_timer); spin_lock_irqsave(&dd->ipath_sdma_lock, flags); dd->ipath_sdma_abort_jiffies = 0; ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabase, 0); ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, 0); ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, 0); ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr, 0); ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0, 0); ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1, 0); ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask2, 0); if (dd->ipath_sdma_head_dma) { sdma_head_dma = (void *) dd->ipath_sdma_head_dma; sdma_head_phys = dd->ipath_sdma_head_phys; dd->ipath_sdma_head_dma = NULL; dd->ipath_sdma_head_phys = 0; } if (dd->ipath_sdma_descq) { sdma_descq = dd->ipath_sdma_descq; sdma_descq_phys = dd->ipath_sdma_descq_phys; dd->ipath_sdma_descq = NULL; dd->ipath_sdma_descq_phys = 0; } spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); if (sdma_head_dma) dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, sdma_head_dma, sdma_head_phys); if (sdma_descq) dma_free_coherent(&dd->pcidev->dev, SDMA_DESCQ_SZ, sdma_descq, sdma_descq_phys); } /* * [Re]start SDMA, if we use it, and it's not already OK. * This is called on transition to link ACTIVE, either the first or * subsequent times. */ void ipath_restart_sdma(struct ipath_devdata *dd) { unsigned long flags; int needed = 1; if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA)) goto bail; /* * First, make sure we should, which is to say, * check that we are "RUNNING" (not in teardown) * and not "SHUTDOWN" */ spin_lock_irqsave(&dd->ipath_sdma_lock, flags); if (!test_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status) || test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status)) needed = 0; else { __clear_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status); __clear_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status); __clear_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status); } spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); if (!needed) { ipath_dbg("invalid attempt to restart SDMA, status 0x%08lx\n", dd->ipath_sdma_status); goto bail; } spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); /* * First clear, just to be safe. Enable is only done * in chip on 0->1 transition */ dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE; ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl); ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); dd->ipath_sendctrl |= INFINIPATH_S_SDMAENABLE; ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl); ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); /* notify upper layers */ ipath_ib_piobufavail(dd->verbs_dev); bail: return; } static inline void make_sdma_desc(struct ipath_devdata *dd, u64 *sdmadesc, u64 addr, u64 dwlen, u64 dwoffset) { WARN_ON(addr & 3); /* SDmaPhyAddr[47:32] */ sdmadesc[1] = addr >> 32; /* SDmaPhyAddr[31:0] */ sdmadesc[0] = (addr & 0xfffffffcULL) << 32; /* SDmaGeneration[1:0] */ sdmadesc[0] |= (dd->ipath_sdma_generation & 3ULL) << 30; /* SDmaDwordCount[10:0] */ sdmadesc[0] |= (dwlen & 0x7ffULL) << 16; /* SDmaBufOffset[12:2] */ sdmadesc[0] |= dwoffset & 0x7ffULL; } /* * This function queues one IB packet onto the send DMA queue per call. * The caller is responsible for checking: * 1) The number of send DMA descriptor entries is less than the size of * the descriptor queue. * 2) The IB SGE addresses and lengths are 32-bit aligned * (except possibly the last SGE's length) * 3) The SGE addresses are suitable for passing to dma_map_single(). */ int ipath_sdma_verbs_send(struct ipath_devdata *dd, struct ipath_sge_state *ss, u32 dwords, struct ipath_verbs_txreq *tx) { unsigned long flags; struct ipath_sge *sge; int ret = 0; u16 tail; __le64 *descqp; u64 sdmadesc[2]; u32 dwoffset; dma_addr_t addr; if ((tx->map_len + (dwords<<2)) > dd->ipath_ibmaxlen) { ipath_dbg("packet size %X > ibmax %X, fail\n", tx->map_len + (dwords<<2), dd->ipath_ibmaxlen); ret = -EMSGSIZE; goto fail; } spin_lock_irqsave(&dd->ipath_sdma_lock, flags); retry: if (unlikely(test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status))) { ret = -EBUSY; goto unlock; } if (tx->txreq.sg_count > ipath_sdma_descq_freecnt(dd)) { if (ipath_sdma_make_progress(dd)) goto retry; ret = -ENOBUFS; goto unlock; } addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr, tx->map_len, DMA_TO_DEVICE); if (dma_mapping_error(&dd->pcidev->dev, addr)) goto ioerr; dwoffset = tx->map_len >> 2; make_sdma_desc(dd, sdmadesc, (u64) addr, dwoffset, 0); /* SDmaFirstDesc */ sdmadesc[0] |= 1ULL << 12; if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF) sdmadesc[0] |= 1ULL << 14; /* SDmaUseLargeBuf */ /* write to the descq */ tail = dd->ipath_sdma_descq_tail; descqp = &dd->ipath_sdma_descq[tail].qw[0]; *descqp++ = cpu_to_le64(sdmadesc[0]); *descqp++ = cpu_to_le64(sdmadesc[1]); if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEDESC) tx->txreq.start_idx = tail; /* increment the tail */ if (++tail == dd->ipath_sdma_descq_cnt) { tail = 0; descqp = &dd->ipath_sdma_descq[0].qw[0]; ++dd->ipath_sdma_generation; } sge = &ss->sge; while (dwords) { u32 dw; u32 len; len = dwords << 2; if (len > sge->length) len = sge->length; if (len > sge->sge_length) len = sge->sge_length; BUG_ON(len == 0); dw = (len + 3) >> 2; addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2, DMA_TO_DEVICE); if (dma_mapping_error(&dd->pcidev->dev, addr)) goto unmap; make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset); /* SDmaUseLargeBuf has to be set in every descriptor */ if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF) sdmadesc[0] |= 1ULL << 14; /* write to the descq */ *descqp++ = cpu_to_le64(sdmadesc[0]); *descqp++ = cpu_to_le64(sdmadesc[1]); /* increment the tail */ if (++tail == dd->ipath_sdma_descq_cnt) { tail = 0; descqp = &dd->ipath_sdma_descq[0].qw[0]; ++dd->ipath_sdma_generation; } sge->vaddr += len; sge->length -= len; sge->sge_length -= len; if (sge->sge_length == 0) { if (--ss->num_sge) *sge = *ss->sg_list++; } else if (sge->length == 0 && sge->mr != NULL) { if (++sge->n >= IPATH_SEGSZ) { if (++sge->m >= sge->mr->mapsz) break; sge->n = 0; } sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; sge->length = sge->mr->map[sge->m]->segs[sge->n].length; } dwoffset += dw; dwords -= dw; } if (!tail) descqp = &dd->ipath_sdma_descq[dd->ipath_sdma_descq_cnt].qw[0]; descqp -= 2; /* SDmaLastDesc */ descqp[0] |= cpu_to_le64(1ULL << 11); if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_INTREQ) { /* SDmaIntReq */ descqp[0] |= cpu_to_le64(1ULL << 15); } /* Commit writes to memory and advance the tail on the chip */ wmb(); ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, tail); tx->txreq.next_descq_idx = tail; tx->txreq.callback_status = IPATH_SDMA_TXREQ_S_OK; dd->ipath_sdma_descq_tail = tail; dd->ipath_sdma_descq_added += tx->txreq.sg_count; list_add_tail(&tx->txreq.list, &dd->ipath_sdma_activelist); if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_VL15) vl15_watchdog_enq(dd); goto unlock; unmap: while (tail != dd->ipath_sdma_descq_tail) { if (!tail) tail = dd->ipath_sdma_descq_cnt - 1; else tail--; unmap_desc(dd, tail); } ioerr: ret = -EIO; unlock: spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); fail: return ret; }
gpl-2.0
rhvgoyal/systemd
src/login/test-login-shared.c
7
1246
/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/ /*** This file is part of systemd. Copyright 2013 Zbigniew Jędrzejewski-Szmek systemd is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. systemd is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with systemd; If not, see <http://www.gnu.org/licenses/>. ***/ #include "login-util.h" #include "macro.h" static void test_session_id_valid(void) { assert_se(session_id_valid("c1")); assert_se(session_id_valid("1234")); assert_se(!session_id_valid("1-2")); assert_se(!session_id_valid("")); assert_se(!session_id_valid("\tid")); } int main(int argc, char* argv[]) { log_parse_environment(); log_open(); test_session_id_valid(); return 0; }
gpl-2.0
shizhai/wprobe
build_dir/target-mips_r2_uClibc-0.9.33.2/cyassl-1.6.5/ctaocrypt/src/sha512.c
7
8232
/* sha512.c * * Copyright (C) 2006-2010 Sawtooth Consulting Ltd. * * This file is part of CyaSSL. * * CyaSSL is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * CyaSSL is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA */ #ifdef CYASSL_SHA512 #include "sha512.h" #ifdef NO_INLINE #include "misc.h" #else #include "misc.c" #endif #include <string.h> #include <assert.h> #ifndef min static INLINE word32 min(word32 a, word32 b) { return a > b ? b : a; } #endif /* min */ void InitSha512(Sha512* sha512) { sha512->digest[0] = W64LIT(0x6a09e667f3bcc908); sha512->digest[1] = W64LIT(0xbb67ae8584caa73b); sha512->digest[2] = W64LIT(0x3c6ef372fe94f82b); sha512->digest[3] = W64LIT(0xa54ff53a5f1d36f1); sha512->digest[4] = W64LIT(0x510e527fade682d1); sha512->digest[5] = W64LIT(0x9b05688c2b3e6c1f); sha512->digest[6] = W64LIT(0x1f83d9abfb41bd6b); sha512->digest[7] = W64LIT(0x5be0cd19137e2179); sha512->buffLen = 0; sha512->loLen = 0; sha512->hiLen = 0; } static const word64 K512[80] = { W64LIT(0x428a2f98d728ae22), W64LIT(0x7137449123ef65cd), W64LIT(0xb5c0fbcfec4d3b2f), W64LIT(0xe9b5dba58189dbbc), W64LIT(0x3956c25bf348b538), W64LIT(0x59f111f1b605d019), W64LIT(0x923f82a4af194f9b), W64LIT(0xab1c5ed5da6d8118), W64LIT(0xd807aa98a3030242), W64LIT(0x12835b0145706fbe), W64LIT(0x243185be4ee4b28c), W64LIT(0x550c7dc3d5ffb4e2), W64LIT(0x72be5d74f27b896f), W64LIT(0x80deb1fe3b1696b1), W64LIT(0x9bdc06a725c71235), W64LIT(0xc19bf174cf692694), W64LIT(0xe49b69c19ef14ad2), W64LIT(0xefbe4786384f25e3), W64LIT(0x0fc19dc68b8cd5b5), W64LIT(0x240ca1cc77ac9c65), W64LIT(0x2de92c6f592b0275), W64LIT(0x4a7484aa6ea6e483), W64LIT(0x5cb0a9dcbd41fbd4), W64LIT(0x76f988da831153b5), W64LIT(0x983e5152ee66dfab), W64LIT(0xa831c66d2db43210), W64LIT(0xb00327c898fb213f), W64LIT(0xbf597fc7beef0ee4), W64LIT(0xc6e00bf33da88fc2), W64LIT(0xd5a79147930aa725), W64LIT(0x06ca6351e003826f), W64LIT(0x142929670a0e6e70), W64LIT(0x27b70a8546d22ffc), W64LIT(0x2e1b21385c26c926), W64LIT(0x4d2c6dfc5ac42aed), W64LIT(0x53380d139d95b3df), W64LIT(0x650a73548baf63de), W64LIT(0x766a0abb3c77b2a8), W64LIT(0x81c2c92e47edaee6), W64LIT(0x92722c851482353b), W64LIT(0xa2bfe8a14cf10364), W64LIT(0xa81a664bbc423001), W64LIT(0xc24b8b70d0f89791), W64LIT(0xc76c51a30654be30), W64LIT(0xd192e819d6ef5218), W64LIT(0xd69906245565a910), W64LIT(0xf40e35855771202a), W64LIT(0x106aa07032bbd1b8), W64LIT(0x19a4c116b8d2d0c8), W64LIT(0x1e376c085141ab53), W64LIT(0x2748774cdf8eeb99), W64LIT(0x34b0bcb5e19b48a8), W64LIT(0x391c0cb3c5c95a63), W64LIT(0x4ed8aa4ae3418acb), W64LIT(0x5b9cca4f7763e373), W64LIT(0x682e6ff3d6b2b8a3), W64LIT(0x748f82ee5defb2fc), W64LIT(0x78a5636f43172f60), W64LIT(0x84c87814a1f0ab72), W64LIT(0x8cc702081a6439ec), W64LIT(0x90befffa23631e28), W64LIT(0xa4506cebde82bde9), W64LIT(0xbef9a3f7b2c67915), W64LIT(0xc67178f2e372532b), W64LIT(0xca273eceea26619c), W64LIT(0xd186b8c721c0c207), W64LIT(0xeada7dd6cde0eb1e), W64LIT(0xf57d4f7fee6ed178), W64LIT(0x06f067aa72176fba), W64LIT(0x0a637dc5a2c898a6), W64LIT(0x113f9804bef90dae), W64LIT(0x1b710b35131c471b), W64LIT(0x28db77f523047d84), W64LIT(0x32caab7b40c72493), W64LIT(0x3c9ebe0a15c9bebc), W64LIT(0x431d67c49c100d4c), W64LIT(0x4cc5d4becb3e42b6), W64LIT(0x597f299cfc657e2a), W64LIT(0x5fcb6fab3ad6faec), W64LIT(0x6c44198c4a475817) }; #define blk0(i) (W[i] = sha512->buffer[i]) #define blk2(i) (W[i&15]+=s1(W[(i-2)&15])+W[(i-7)&15]+s0(W[(i-15)&15])) #define Ch(x,y,z) (z^(x&(y^z))) #define Maj(x,y,z) ((x&y)|(z&(x|y))) #define a(i) T[(0-i)&7] #define b(i) T[(1-i)&7] #define c(i) T[(2-i)&7] #define d(i) T[(3-i)&7] #define e(i) T[(4-i)&7] #define f(i) T[(5-i)&7] #define g(i) T[(6-i)&7] #define h(i) T[(7-i)&7] #define S0(x) (rotrFixed64(x,28)^rotrFixed64(x,34)^rotrFixed64(x,39)) #define S1(x) (rotrFixed64(x,14)^rotrFixed64(x,18)^rotrFixed64(x,41)) #define s0(x) (rotrFixed64(x,1)^rotrFixed64(x,8)^(x>>7)) #define s1(x) (rotrFixed64(x,19)^rotrFixed64(x,61)^(x>>6)) #define R(i) h(i)+=S1(e(i))+Ch(e(i),f(i),g(i))+K[i+j]+(j?blk2(i):blk0(i));\ d(i)+=h(i);h(i)+=S0(a(i))+Maj(a(i),b(i),c(i)) static void Transform(Sha512* sha512) { const word64* K = K512; word32 j; word64 W[16]; word64 T[8]; /* Copy digest to working vars */ memcpy(T, sha512->digest, sizeof(T)); /* 64 operations, partially loop unrolled */ for (j = 0; j < 80; j += 16) { R( 0); R( 1); R( 2); R( 3); R( 4); R( 5); R( 6); R( 7); R( 8); R( 9); R(10); R(11); R(12); R(13); R(14); R(15); } /* Add the working vars back into digest */ sha512->digest[0] += a(0); sha512->digest[1] += b(0); sha512->digest[2] += c(0); sha512->digest[3] += d(0); sha512->digest[4] += e(0); sha512->digest[5] += f(0); sha512->digest[6] += g(0); sha512->digest[7] += h(0); /* Wipe variables */ memset(W, 0, sizeof(W)); memset(T, 0, sizeof(T)); } static INLINE void AddLength(Sha512* sha512, word32 len) { word32 tmp = sha512->loLen; if ( (sha512->loLen += len) < tmp) sha512->hiLen++; /* carry low to high */ } void Sha512Update(Sha512* sha512, const byte* data, word32 len) { /* do block size increments */ byte* local = (byte*)sha512->buffer; while (len) { word32 add = min(len, SHA512_BLOCK_SIZE - sha512->buffLen); memcpy(&local[sha512->buffLen], data, add); sha512->buffLen += add; data += add; len -= add; if (sha512->buffLen == SHA512_BLOCK_SIZE) { #ifdef LITTLE_ENDIAN_ORDER ByteReverseWords64(sha512->buffer, sha512->buffer, SHA512_BLOCK_SIZE); #endif Transform(sha512); AddLength(sha512, SHA512_BLOCK_SIZE); sha512->buffLen = 0; } } } void Sha512Final(Sha512* sha512, byte* hash) { byte* local = (byte*)sha512->buffer; AddLength(sha512, sha512->buffLen); /* before adding pads */ local[sha512->buffLen++] = 0x80; /* add 1 */ /* pad with zeros */ if (sha512->buffLen > SHA512_PAD_SIZE) { memset(&local[sha512->buffLen], 0, SHA512_BLOCK_SIZE - sha512->buffLen); sha512->buffLen += SHA512_BLOCK_SIZE - sha512->buffLen; #ifdef LITTLE_ENDIAN_ORDER ByteReverseWords64(sha512->buffer,sha512->buffer,SHA512_BLOCK_SIZE); #endif Transform(sha512); sha512->buffLen = 0; } memset(&local[sha512->buffLen], 0, SHA512_PAD_SIZE - sha512->buffLen); /* put lengths in bits */ sha512->loLen = sha512->loLen << 3; sha512->hiLen = (sha512->loLen >> (8*sizeof(sha512->loLen) - 3)) + (sha512->hiLen << 3); /* store lengths */ #ifdef LITTLE_ENDIAN_ORDER ByteReverseWords64(sha512->buffer, sha512->buffer, SHA512_PAD_SIZE); #endif /* ! length ordering dependent on digest endian type ! */ sha512->buffer[SHA512_BLOCK_SIZE / sizeof(word64) - 2] = sha512->hiLen; sha512->buffer[SHA512_BLOCK_SIZE / sizeof(word64) - 1] = sha512->loLen; Transform(sha512); #ifdef LITTLE_ENDIAN_ORDER ByteReverseWords64(sha512->digest, sha512->digest, SHA512_DIGEST_SIZE); #endif memcpy(hash, sha512->digest, SHA512_DIGEST_SIZE); InitSha512(sha512); /* reset state */ } #endif /* CYASSL_SHA512 */
gpl-2.0
zarboz/xbmc
xbmc/pvrclients/vdr-vnsi/vdr-plugin-vnsiserver/demuxer_MPEGVideo.c
7
9808
/* * vdr-plugin-vnsi - XBMC server plugin for VDR * * Copyright (C) 2010 Alwin Esch (Team XBMC) * * http://www.xbmc.org * * This Program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This Program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with XBMC; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * http://www.gnu.org/copyleft/gpl.html * */ #include <stdlib.h> #include <assert.h> #include "config.h" #include "bitstream.h" #include "receiver.h" #include "demuxer_MPEGVideo.h" using namespace std; #define MPEG_PICTURE_START 0x00000100 #define MPEG_SEQUENCE_START 0x000001b3 #define MPEG_SEQUENCE_EXTENSION 0x000001b5 #define MPEG_SLICE_S 0x00000101 #define MPEG_SLICE_E 0x000001af /** * MPEG2VIDEO frame duration table (in 90kHz clock domain) */ const unsigned int mpeg2video_framedurations[16] = { 0, 3753, 3750, 3600, 3003, 3000, 1800, 1501, 1500, }; cParserMPEG2Video::cParserMPEG2Video(cTSDemuxer *demuxer, cLiveStreamer *streamer, int streamID) : cParser(streamer, streamID) { m_pictureBuffer = NULL; m_pictureBufferSize = 0; m_pictureBufferPtr = 0; m_StartCond = 0; m_StartCode = 0; m_StartCodeOffset = 0; m_FrameDuration = 0; m_vbvDelay = -1; m_vbvSize = 0; m_Height = 0; m_Width = 0; m_StreamPacket = NULL; m_demuxer = demuxer; } cParserMPEG2Video::~cParserMPEG2Video() { while (!m_PTSQueue.empty()) { sStreamPacket* pkt = m_PTSQueue.front(); m_PTSQueue.pop_front(); free(pkt->data); delete pkt; } while (!m_DurationQueue.empty()) { sStreamPacket* pkt = m_DurationQueue.front(); m_DurationQueue.pop_front(); free(pkt->data); delete pkt; } if (m_pictureBuffer) { free(m_pictureBuffer); m_pictureBuffer = NULL; } } void cParserMPEG2Video::Parse(unsigned char *data, int size, bool pusi) { uint32_t startcode = m_StartCond; /* Parse PES header here for MPEG PS streams like from pvrinput */ if (pusi && m_Streamer->IsMPEGPS()) { int hlen; hlen = ParsePESHeader(data, size); #if 0 int i; for(i = 0; i < 16; i++) printf("%02x.", data[i]); printf(" %d\n", hlen); #endif data += hlen; size -= hlen; if(size < 1) return; } if (m_pictureBuffer == NULL) { m_pictureBufferSize = 4000; m_pictureBuffer = (uint8_t*)malloc(m_pictureBufferSize); } if (m_pictureBufferPtr + size + 4 >= m_pictureBufferSize) { m_pictureBufferSize += size * 4; m_pictureBuffer = (uint8_t*)realloc(m_pictureBuffer, m_pictureBufferSize); } for (int i = 0; i < size; i++) { if (!m_pictureBuffer) break; m_pictureBuffer[m_pictureBufferPtr++] = data[i]; startcode = startcode << 8 | data[i]; if ((startcode & 0xffffff00) != 0x00000100) continue; bool reset = true; if (m_pictureBufferPtr - 4 > 0 && m_StartCode != 0) { reset = Parse_MPEG2Video(m_pictureBufferPtr - 4, startcode, m_StartCodeOffset); } if (reset) { /* Reset packet parser upon length error or if parser tells us so */ m_pictureBufferPtr = 0; m_pictureBuffer[m_pictureBufferPtr++] = startcode >> 24; m_pictureBuffer[m_pictureBufferPtr++] = startcode >> 16; m_pictureBuffer[m_pictureBufferPtr++] = startcode >> 8; m_pictureBuffer[m_pictureBufferPtr++] = startcode >> 0; } m_StartCode = startcode; m_StartCodeOffset = m_pictureBufferPtr - 4; } m_StartCond = startcode; } bool cParserMPEG2Video::Parse_MPEG2Video(size_t len, uint32_t next_startcode, int sc_offset) { int frametype; uint8_t *buf = m_pictureBuffer + sc_offset; cBitstream bs(buf + 4, (len - 4) * 8); switch (m_StartCode) { case 0x000001e0 ... 0x000001ef: /* System start codes for video */ if (len >= 9) ParsePESHeader(buf, len); return true; case 0x00000100: /* Picture start code */ if (m_FrameDuration == 0 || m_curDTS == DVD_NOPTS_VALUE) return true; if (Parse_MPEG2Video_PicStart(&frametype, &bs)) return true; if (m_StreamPacket != NULL) { ERRORLOG("MPEG2 Video got a new picture start code with already openend steam packed"); } m_StreamPacket = new sStreamPacket; m_StreamPacket->id = m_streamID; m_StreamPacket->pts = m_curPTS; m_StreamPacket->dts = m_curDTS; m_StreamPacket->frametype = frametype; m_StreamPacket->duration = 0; m_FoundFrame = true; break; case 0x000001b3: /* Sequence start code */ if (Parse_MPEG2Video_SeqStart(&bs)) return true; break; case 0x000001b5: if(len < 5) return true; switch(buf[4] >> 4) { case 0x1: /* sequence extension */ // printf("Sequence extension, len = %d\n", len); if(len < 10) return true; // printf("Profile = %d\n", buf[4] & 0x7); // printf(" Level = %d\n", buf[5] >> 4); break; } break; case 0x00000101 ... 0x000001af: /* Slices */ if (next_startcode == 0x100 || next_startcode > 0x1af) { /* Last picture slice (because next not a slice) */ if(m_StreamPacket == NULL) { /* no packet, may've been discarded by sanity checks here */ return true; } m_StreamPacket->data = m_pictureBuffer; m_StreamPacket->size = m_pictureBufferPtr - 4; m_StreamPacket->duration = m_FrameDuration; Parse_ComputePTS(m_StreamPacket); m_StreamPacket = NULL; m_pictureBuffer = (uint8_t*)malloc(m_pictureBufferSize); /* If we know the frame duration, increase DTS accordingly */ m_curDTS += m_FrameDuration; /* PTS cannot be extrapolated (it's not linear) */ m_curPTS = DVD_NOPTS_VALUE; return true; } break; default: break; } return false; } bool cParserMPEG2Video::Parse_MPEG2Video_SeqStart(cBitstream *bs) { if (bs->length() < 61) return true; m_Width = bs->readBits(12); m_Height = bs->readBits(12); // figure out Display Aspect Ratio double DAR = 0; uint8_t aspect = bs->readBits(4); switch(aspect) { case 0: default: ERRORLOG("invalid / forbidden DAR in sequence header !"); break; case 1: DAR = 1.0; break; case 2: DAR = 4.0/3.0; break; case 3: DAR = 16.0/9.0; break; case 4: DAR = 2.21; break; } DEBUGLOG("MPEG2 DAR: %.2f", DAR); m_FrameDuration = mpeg2video_framedurations[bs->readBits(4)]; bs->skipBits(18); bs->skipBits(1); if (m_Width > 0) m_Streamer->SetReady(); m_vbvSize = bs->readBits(10) * 16 * 1024 / 8; m_demuxer->SetVideoInformation(0,0, m_Height, m_Width, DAR); return false; } bool cParserMPEG2Video::Parse_MPEG2Video_PicStart(int *frametype, cBitstream *bs) { if (bs->length() < 29) return true; bs->skipBits(10); /* temporal reference */ int pct = bs->readBits(3); if (pct < PKT_I_FRAME || pct > PKT_B_FRAME) return true; /* Illegal picture_coding_type */ *frametype = pct; int vbvDelay = bs->readBits(16); /* vbv_delay */ if (vbvDelay == 0xffff) m_vbvDelay = -1; else m_vbvDelay = Rescale(vbvDelay); return false; } void cParserMPEG2Video::Parse_ComputePTS(sStreamPacket *pkt) { bool validpts = pkt->pts != DVD_NOPTS_VALUE && m_PTSQueue.size() == 0; /* PTS known and no other packets in queue, deliver at once */ if (validpts && pkt->duration) { if (m_Width > 0) SendPacket(pkt); free(pkt->data); delete pkt; return; } if (validpts) return Parse_ComputeDuration(pkt); m_PTSQueue.push_back(pkt); while (!m_PTSQueue.empty()) { pkt = m_PTSQueue.front(); switch (pkt->frametype) { case PKT_B_FRAME: /* B-frames have same PTS as DTS, pass them on */ pkt->pts = pkt->dts; break; case PKT_I_FRAME: case PKT_P_FRAME: /* Presentation occures at DTS of next I or P frame, try to find it */ deque<sStreamPacket*>::iterator it; it = m_PTSQueue.begin()+1; while (1) { if (it >= m_PTSQueue.end()) return; /* not arrived yet, wait */ sStreamPacket* pkt2 = *it++; if (pkt2->frametype <= PKT_P_FRAME) { pkt->pts = pkt2->dts; break; } } break; } m_PTSQueue.pop_front(); if (pkt->duration == 0) { Parse_ComputeDuration(pkt); } else { if (m_Width > 0) SendPacket(pkt); free(pkt->data); delete pkt; } } } void cParserMPEG2Video::Parse_ComputeDuration(sStreamPacket *pkt) { m_DurationQueue.push_back(pkt); pkt = m_DurationQueue.front(); if (m_DurationQueue.size() <= 1) return; sStreamPacket *next = m_DurationQueue[1]; int64_t duration = next->dts - pkt->dts; m_DurationQueue.pop_front(); if (duration >= 10) { pkt->duration = duration; if (m_Width > 0) SendPacket(pkt); } free(pkt->data); }
gpl-2.0
hroark13/android_kernel_lge_f6x
arch/arm/kernel/process.c
7
17927
/* * linux/arch/arm/kernel/process.c * * Copyright (C) 1996-2000 Russell King - Converted to ARM. * Original Copyright (C) 1995 Linus Torvalds * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <stdarg.h> #include <linux/export.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/user.h> #include <linux/delay.h> #include <linux/reboot.h> #include <linux/interrupt.h> #include <linux/kallsyms.h> #include <linux/init.h> #include <linux/cpu.h> #include <linux/elfcore.h> #include <linux/pm.h> #include <linux/tick.h> #include <linux/utsname.h> #include <linux/uaccess.h> #include <linux/random.h> #include <linux/hw_breakpoint.h> #include <linux/cpuidle.h> #include <linux/console.h> #include <asm/cacheflush.h> #include <asm/processor.h> #include <asm/thread_notify.h> #include <asm/stacktrace.h> #include <asm/mach/time.h> #ifdef CONFIG_CC_STACKPROTECTOR #include <linux/stackprotector.h> unsigned long __stack_chk_guard __read_mostly; EXPORT_SYMBOL(__stack_chk_guard); #endif static const char *processor_modes[] = { "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" , "UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26", "USER_32", "FIQ_32" , "IRQ_32" , "SVC_32" , "UK4_32" , "UK5_32" , "UK6_32" , "ABT_32" , "UK8_32" , "UK9_32" , "UK10_32", "UND_32" , "UK12_32", "UK13_32", "UK14_32", "SYS_32" }; static const char *isa_modes[] = { "ARM" , "Thumb" , "Jazelle", "ThumbEE" }; extern void setup_mm_for_reboot(void); static volatile int hlt_counter; #ifdef CONFIG_SMP void arch_trigger_all_cpu_backtrace(void) { smp_send_all_cpu_backtrace(); } #else void arch_trigger_all_cpu_backtrace(void) { dump_stack(); } #endif void disable_hlt(void) { hlt_counter++; } EXPORT_SYMBOL(disable_hlt); void enable_hlt(void) { hlt_counter--; } EXPORT_SYMBOL(enable_hlt); static int __init nohlt_setup(char *__unused) { hlt_counter = 1; return 1; } static int __init hlt_setup(char *__unused) { hlt_counter = 0; return 1; } __setup("nohlt", nohlt_setup); __setup("hlt", hlt_setup); extern void call_with_stack(void (*fn)(void *), void *arg, void *sp); typedef void (*phys_reset_t)(unsigned long); #ifdef CONFIG_ARM_FLUSH_CONSOLE_ON_RESTART void arm_machine_flush_console(void) { printk("\n"); pr_emerg("Restarting %s\n", linux_banner); if (console_trylock()) { console_unlock(); return; } mdelay(50); local_irq_disable(); if (!console_trylock()) pr_emerg("arm_restart: Console was locked! Busting\n"); else pr_emerg("arm_restart: Console was locked!\n"); console_unlock(); } #else void arm_machine_flush_console(void) { } #endif /* * A temporary stack to use for CPU reset. This is static so that we * don't clobber it with the identity mapping. When running with this * stack, any references to the current task *will not work* so you * should really do as little as possible before jumping to your reset * code. */ static u64 soft_restart_stack[16]; /* 2012-03-07 jinkyu.choi@lge.com * call pet_watchdog * for avoiding apps watchdog bark while rebooting sequence */ #ifdef CONFIG_MACH_LGE extern void pet_watchdog(void); #endif static void __soft_restart(void *addr) { phys_reset_t phys_reset; /* LGE_CHANGE */ #ifdef CONFIG_MACH_LGE pet_watchdog(); #endif /* Take out a flat memory mapping. */ setup_mm_for_reboot(); /* Clean and invalidate caches */ flush_cache_all(); /* Turn off caching */ cpu_proc_fin(); /* Push out any further dirty data, and ensure cache is empty */ flush_cache_all(); /* Push out the dirty data from external caches */ outer_disable(); /* Switch to the identity mapping. */ phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); phys_reset((unsigned long)addr); /* Should never get here. */ BUG(); } void soft_restart(unsigned long addr) { u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack); /* Disable interrupts first */ local_irq_disable(); local_fiq_disable(); /* Disable the L2 if we're the last man standing. */ if (num_online_cpus() == 1) outer_disable(); /* Change to the new stack and continue with the reset. */ call_with_stack(__soft_restart, (void *)addr, (void *)stack); /* Should never get here. */ BUG(); } static void null_restart(char mode, const char *cmd) { } /* * Function pointers to optional machine specific functions */ void (*pm_power_off)(void); EXPORT_SYMBOL(pm_power_off); void (*arm_pm_restart)(char str, const char *cmd) = null_restart; EXPORT_SYMBOL_GPL(arm_pm_restart); static void do_nothing(void *unused) { } /* * cpu_idle_wait - Used to ensure that all the CPUs discard old value of * pm_idle and update to new pm_idle value. Required while changing pm_idle * handler on SMP systems. * * Caller must have changed pm_idle to the new value before the call. Old * pm_idle value will not be used by any CPU after the return of this function. */ void cpu_idle_wait(void) { smp_mb(); /* kick all the CPUs so that they exit out of pm_idle */ smp_call_function(do_nothing, NULL, 1); } EXPORT_SYMBOL_GPL(cpu_idle_wait); /* * This is our default idle handler. */ extern void arch_idle(void); void (*arm_pm_idle)(void) = arch_idle; static void default_idle(void) { if (arm_pm_idle) arm_pm_idle(); else cpu_do_idle(); local_irq_enable(); } void (*pm_idle)(void) = default_idle; EXPORT_SYMBOL(pm_idle); /* * The idle thread, has rather strange semantics for calling pm_idle, * but this is what x86 does and we need to do the same, so that * things like cpuidle get called in the same way. The only difference * is that we always respect 'hlt_counter' to prevent low power idle. */ void cpu_idle(void) { local_fiq_enable(); /* endless idle loop with no priority at all */ while (1) { idle_notifier_call_chain(IDLE_START); tick_nohz_idle_enter(); rcu_idle_enter(); while (!need_resched()) { /* * We need to disable interrupts here * to ensure we don't miss a wakeup call. */ local_irq_disable(); #ifdef CONFIG_PL310_ERRATA_769419 wmb(); #endif if (hlt_counter) { local_irq_enable(); cpu_relax(); } else if (!need_resched()) { stop_critical_timings(); if (cpuidle_idle_call()) pm_idle(); start_critical_timings(); /* * pm_idle functions must always * return with IRQs enabled. */ WARN_ON(irqs_disabled()); } else local_irq_enable(); } rcu_idle_exit(); tick_nohz_idle_exit(); idle_notifier_call_chain(IDLE_END); schedule_preempt_disabled(); #ifdef CONFIG_HOTPLUG_CPU if (cpu_is_offline(smp_processor_id())) cpu_die(); #endif } } static char reboot_mode = 'h'; int __init reboot_setup(char *str) { reboot_mode = str[0]; return 1; } __setup("reboot=", reboot_setup); void machine_shutdown(void) { preempt_disable(); #ifdef CONFIG_SMP /* * Disable preemption so we're guaranteed to * run to power off or reboot and prevent * the possibility of switching to another * thread that might wind up blocking on * one of the stopped CPUs. */ preempt_disable(); smp_send_stop(); #endif } void machine_halt(void) { machine_shutdown(); while (1); } void machine_power_off(void) { machine_shutdown(); if (pm_power_off) pm_power_off(); } void machine_restart(char *cmd) { /* LGE_CHANGE */ #ifdef CONFIG_MACH_LGE preempt_disable(); #endif machine_shutdown(); /* Flush the console to make sure all the relevant messages make it * out to the console drivers */ arm_machine_flush_console(); arm_pm_restart(reboot_mode, cmd); /* LGE_CHANGE */ #ifdef CONFIG_MACH_LGE preempt_enable(); #endif /* Give a grace period for failure to restart of 1s */ mdelay(1000); /* Whoops - the platform was unable to reboot. Tell the user! */ printk("Reboot failed -- System halted\n"); while (1); } /* * dump a block of kernel memory from around the given address */ static void show_data(unsigned long addr, int nbytes, const char *name) { int i, j; int nlines; u32 *p; /* * don't attempt to dump non-kernel addresses or * values that are probably just small negative numbers */ if (addr < PAGE_OFFSET || addr > -256UL) return; printk("\n%s: %#lx:\n", name, addr); /* * round address down to a 32 bit boundary * and always dump a multiple of 32 bytes */ p = (u32 *)(addr & ~(sizeof(u32) - 1)); nbytes += (addr & (sizeof(u32) - 1)); nlines = (nbytes + 31) / 32; for (i = 0; i < nlines; i++) { /* * just display low 16 bits of address to keep * each line of the dump < 80 characters */ printk("%04lx ", (unsigned long)p & 0xffff); for (j = 0; j < 8; j++) { u32 data; if (probe_kernel_address(p, data)) { printk(" ********"); } else { printk(" %08x", data); } ++p; } printk("\n"); } } static void show_extra_register_data(struct pt_regs *regs, int nbytes) { mm_segment_t fs; fs = get_fs(); set_fs(KERNEL_DS); show_data(regs->ARM_pc - nbytes, nbytes * 2, "PC"); show_data(regs->ARM_lr - nbytes, nbytes * 2, "LR"); show_data(regs->ARM_sp - nbytes, nbytes * 2, "SP"); show_data(regs->ARM_ip - nbytes, nbytes * 2, "IP"); show_data(regs->ARM_fp - nbytes, nbytes * 2, "FP"); show_data(regs->ARM_r0 - nbytes, nbytes * 2, "R0"); show_data(regs->ARM_r1 - nbytes, nbytes * 2, "R1"); show_data(regs->ARM_r2 - nbytes, nbytes * 2, "R2"); show_data(regs->ARM_r3 - nbytes, nbytes * 2, "R3"); show_data(regs->ARM_r4 - nbytes, nbytes * 2, "R4"); show_data(regs->ARM_r5 - nbytes, nbytes * 2, "R5"); show_data(regs->ARM_r6 - nbytes, nbytes * 2, "R6"); show_data(regs->ARM_r7 - nbytes, nbytes * 2, "R7"); show_data(regs->ARM_r8 - nbytes, nbytes * 2, "R8"); show_data(regs->ARM_r9 - nbytes, nbytes * 2, "R9"); show_data(regs->ARM_r10 - nbytes, nbytes * 2, "R10"); set_fs(fs); } void __show_regs(struct pt_regs *regs) { unsigned long flags; char buf[64]; #if defined(CONFIG_CPU_CP15_MMU) && defined(CONFIG_LGE_CRASH_HANDLER) /* LGE_CHANGE * save cpu and mmu registers to support simulation when debugging * taehung.kim@lge.com 2011-10-13 */ unsigned int c1,c2; set_crash_store_enable(); #endif printk("CPU: %d %s (%s %.*s)\n", raw_smp_processor_id(), print_tainted(), init_utsname()->release, (int)strcspn(init_utsname()->version, " "), init_utsname()->version); print_symbol("PC is at %s\n", instruction_pointer(regs)); print_symbol("LR is at %s\n", regs->ARM_lr); #ifdef CONFIG_LGE_CRASH_HANDLER printk("pc : %08lx lr : %08lx psr : %08lx\n" #else /* Orignal */ printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n" #endif "sp : %08lx ip : %08lx fp : %08lx\n", regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr, regs->ARM_sp, regs->ARM_ip, regs->ARM_fp); printk("r10: %08lx r9 : %08lx r8 : %08lx\n", regs->ARM_r10, regs->ARM_r9, regs->ARM_r8); printk("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n", regs->ARM_r7, regs->ARM_r6, regs->ARM_r5, regs->ARM_r4); printk("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n", regs->ARM_r3, regs->ARM_r2, regs->ARM_r1, regs->ARM_r0); /* LGE_CHANGE */ #ifdef CONFIG_LGE_CRASH_HANDLER set_crash_store_disable(); #endif flags = regs->ARM_cpsr; buf[0] = flags & PSR_N_BIT ? 'N' : 'n'; buf[1] = flags & PSR_Z_BIT ? 'Z' : 'z'; buf[2] = flags & PSR_C_BIT ? 'C' : 'c'; buf[3] = flags & PSR_V_BIT ? 'V' : 'v'; buf[4] = '\0'; printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n", buf, interrupts_enabled(regs) ? "n" : "ff", fast_interrupts_enabled(regs) ? "n" : "ff", processor_modes[processor_mode(regs)], isa_modes[isa_mode(regs)], get_fs() == get_ds() ? "kernel" : "user"); #ifdef CONFIG_CPU_CP15 { unsigned int ctrl; buf[0] = '\0'; #ifdef CONFIG_CPU_CP15_MMU { unsigned int transbase, dac; asm("mrc p15, 0, %0, c2, c0\n\t" "mrc p15, 0, %1, c3, c0\n" : "=r" (transbase), "=r" (dac)); snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x", transbase, dac); #ifdef CONFIG_LGE_CRASH_HANDLER /* LGE_CHANGE * save cpu and mmu registers to support simulation when debugging * taehung.kim@lge.com 2011-10-13 */ c1=transbase; c2=dac; #endif } #endif asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl)); printk("Control: %08x%s\n", ctrl, buf); #if defined(CONFIG_CPU_CP15_MMU) && defined(CONFIG_LGE_CRASH_HANDLER) /* LGE_CHANGE * save cpu and mmu registers to support simulation when debugging * taehung.kim@lge.com 2011-10-13 */ lge_save_ctx(regs,ctrl,c1,c2); #endif } #endif show_extra_register_data(regs, 128); } void show_regs(struct pt_regs * regs) { printk("\n"); printk("Pid: %d, comm: %20s\n", task_pid_nr(current), current->comm); __show_regs(regs); dump_stack(); } ATOMIC_NOTIFIER_HEAD(thread_notify_head); EXPORT_SYMBOL_GPL(thread_notify_head); /* * Free current thread data structures etc.. */ void exit_thread(void) { thread_notify(THREAD_NOTIFY_EXIT, current_thread_info()); } void flush_thread(void) { struct thread_info *thread = current_thread_info(); struct task_struct *tsk = current; flush_ptrace_hw_breakpoint(tsk); memset(thread->used_cp, 0, sizeof(thread->used_cp)); memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); memset(&thread->fpstate, 0, sizeof(union fp_state)); thread_notify(THREAD_NOTIFY_FLUSH, thread); } void release_thread(struct task_struct *dead_task) { } asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); int copy_thread(unsigned long clone_flags, unsigned long stack_start, unsigned long stk_sz, struct task_struct *p, struct pt_regs *regs) { struct thread_info *thread = task_thread_info(p); struct pt_regs *childregs = task_pt_regs(p); *childregs = *regs; childregs->ARM_r0 = 0; childregs->ARM_sp = stack_start; memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save)); thread->cpu_context.sp = (unsigned long)childregs; thread->cpu_context.pc = (unsigned long)ret_from_fork; clear_ptrace_hw_breakpoint(p); if (clone_flags & CLONE_SETTLS) thread->tp_value = regs->ARM_r3; thread_notify(THREAD_NOTIFY_COPY, thread); return 0; } /* * Fill in the task's elfregs structure for a core dump. */ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs) { elf_core_copy_regs(elfregs, task_pt_regs(t)); return 1; } /* * fill in the fpe structure for a core dump... */ int dump_fpu (struct pt_regs *regs, struct user_fp *fp) { struct thread_info *thread = current_thread_info(); int used_math = thread->used_cp[1] | thread->used_cp[2]; if (used_math) memcpy(fp, &thread->fpstate.soft, sizeof (*fp)); return used_math != 0; } EXPORT_SYMBOL(dump_fpu); /* * Shuffle the argument into the correct register before calling the * thread function. r4 is the thread argument, r5 is the pointer to * the thread function, and r6 points to the exit function. */ extern void kernel_thread_helper(void); asm( ".pushsection .text\n" " .align\n" " .type kernel_thread_helper, #function\n" "kernel_thread_helper:\n" #ifdef CONFIG_TRACE_IRQFLAGS " bl trace_hardirqs_on\n" #endif " msr cpsr_c, r7\n" " mov r0, r4\n" " mov lr, r6\n" " mov pc, r5\n" " .size kernel_thread_helper, . - kernel_thread_helper\n" " .popsection"); #ifdef CONFIG_ARM_UNWIND extern void kernel_thread_exit(long code); asm( ".pushsection .text\n" " .align\n" " .type kernel_thread_exit, #function\n" "kernel_thread_exit:\n" " .fnstart\n" " .cantunwind\n" " bl do_exit\n" " nop\n" " .fnend\n" " .size kernel_thread_exit, . - kernel_thread_exit\n" " .popsection"); #else #define kernel_thread_exit do_exit #endif /* * Create a kernel thread. */ pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) { struct pt_regs regs; memset(&regs, 0, sizeof(regs)); regs.ARM_r4 = (unsigned long)arg; regs.ARM_r5 = (unsigned long)fn; regs.ARM_r6 = (unsigned long)kernel_thread_exit; regs.ARM_r7 = SVC_MODE | PSR_ENDSTATE | PSR_ISETSTATE; regs.ARM_pc = (unsigned long)kernel_thread_helper; regs.ARM_cpsr = regs.ARM_r7 | PSR_I_BIT; return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL); } EXPORT_SYMBOL(kernel_thread); unsigned long get_wchan(struct task_struct *p) { struct stackframe frame; int count = 0; if (!p || p == current || p->state == TASK_RUNNING) return 0; frame.fp = thread_saved_fp(p); frame.sp = thread_saved_sp(p); frame.lr = 0; /* recovered from the stack */ frame.pc = thread_saved_pc(p); do { int ret = unwind_frame(&frame); if (ret < 0) return 0; if (!in_sched_functions(frame.pc)) return frame.pc; } while (count ++ < 16); return 0; } unsigned long arch_randomize_brk(struct mm_struct *mm) { unsigned long range_end = mm->brk + 0x02000000; return randomize_range(mm->brk, range_end, 0) ? : mm->brk; } #ifdef CONFIG_MMU /* * The vectors page is always readable from user space for the * atomic helpers and the signal restart code. Insert it into the * gate_vma so that it is visible through ptrace and /proc/<pid>/mem. */ static struct vm_area_struct gate_vma; static int __init gate_vma_init(void) { gate_vma.vm_start = 0xffff0000; gate_vma.vm_end = 0xffff0000 + PAGE_SIZE; gate_vma.vm_page_prot = PAGE_READONLY_EXEC; gate_vma.vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC; return 0; } arch_initcall(gate_vma_init); struct vm_area_struct *get_gate_vma(struct mm_struct *mm) { return &gate_vma; } int in_gate_area(struct mm_struct *mm, unsigned long addr) { return (addr >= gate_vma.vm_start) && (addr < gate_vma.vm_end); } int in_gate_area_no_mm(unsigned long addr) { return in_gate_area(NULL, addr); } const char *arch_vma_name(struct vm_area_struct *vma) { if (vma == &gate_vma) return "[vectors]"; else if (vma == get_user_timers_vma(NULL)) return "[timers]"; else return NULL; } #endif
gpl-2.0
muzaffar101/rtLinux2.4.35
drivers/video/matrox/matroxfb_g450.c
7
15886
/* * * Hardware accelerated Matrox Millennium I, II, Mystique, G100, G200, G400 and G450. * * (c) 1998-2002 Petr Vandrovec <vandrove@vc.cvut.cz> * * Portions Copyright (c) 2001 Matrox Graphics Inc. * * Version: 1.64 2002/06/02 * * See matroxfb_base.c for contributors. * */ #include "matroxfb_base.h" #include "matroxfb_misc.h" #include "matroxfb_DAC1064.h" #include "g450_pll.h" #include <linux/matroxfb.h> #include <asm/uaccess.h> #include <asm/div64.h> /* Definition of the various controls */ struct mctl { struct matroxfb_queryctrl desc; size_t control; }; #define BLMIN 0xF3 #define WLMAX 0x3FF static const struct mctl g450_controls[] = { { { MATROXFB_CID_BRIGHTNESS, "brightness", 0, WLMAX-BLMIN, 1, 370-BLMIN, MATROXFB_CTRL_TYPE_INTEGER, 0, 0, "picture" }, offsetof(struct matrox_fb_info, altout.tvo_params.brightness) }, { { MATROXFB_CID_CONTRAST, "contrast", 0, 1023, 1, 127, MATROXFB_CTRL_TYPE_INTEGER, 0, 0, "picture" }, offsetof(struct matrox_fb_info, altout.tvo_params.contrast) }, { { MATROXFB_CID_SATURATION, "saturation", 0, 255, 1, 165, MATROXFB_CTRL_TYPE_INTEGER, 0, 0, "picture" }, offsetof(struct matrox_fb_info, altout.tvo_params.saturation) }, { { MATROXFB_CID_HUE, "hue", 0, 255, 1, 0, MATROXFB_CTRL_TYPE_INTEGER, 0, 0, "picture" }, offsetof(struct matrox_fb_info, altout.tvo_params.hue) }, { { MATROXFB_CID_TESTOUT, "test output", 0, 1, 1, 0, MATROXFB_CTRL_TYPE_BOOLEAN, 0, 0, "picture" }, offsetof(struct matrox_fb_info, altout.tvo_params.testout) }, }; #define G450CTRLS (sizeof(g450_controls)/sizeof(g450_controls[0])) /* Return: positive number: id found -EINVAL: id not found, return failure -ENOENT: id not found, create fake disabled control */ static int get_ctrl_id(__u32 v4l2_id) { int i; for (i = 0; i < G450CTRLS; i++) { if (v4l2_id < g450_controls[i].desc.id) { if (g450_controls[i].desc.id == 0x08000000) { return -EINVAL; } return -ENOENT; } if (v4l2_id == g450_controls[i].desc.id) { return i; } } return -EINVAL; } static inline int* get_ctrl_ptr(WPMINFO unsigned int idx) { return (int*)((char*)MINFO + g450_controls[idx].control); } static void tvo_fill_defaults(WPMINFO2) { unsigned int i; for (i = 0; i < G450CTRLS; i++) { *get_ctrl_ptr(PMINFO i) = g450_controls[i].desc.default_value; } } static int cve2_get_reg(WPMINFO int reg) { unsigned long flags; int val; matroxfb_DAC_lock_irqsave(flags); matroxfb_DAC_out(PMINFO 0x87, reg); val = matroxfb_DAC_in(PMINFO 0x88); matroxfb_DAC_unlock_irqrestore(flags); return val; } static void cve2_set_reg(WPMINFO int reg, int val) { unsigned long flags; matroxfb_DAC_lock_irqsave(flags); matroxfb_DAC_out(PMINFO 0x87, reg); matroxfb_DAC_out(PMINFO 0x88, val); matroxfb_DAC_unlock_irqrestore(flags); } static void cve2_set_reg10(WPMINFO int reg, int val) { unsigned long flags; matroxfb_DAC_lock_irqsave(flags); matroxfb_DAC_out(PMINFO 0x87, reg); matroxfb_DAC_out(PMINFO 0x88, val >> 2); matroxfb_DAC_out(PMINFO 0x87, reg + 1); matroxfb_DAC_out(PMINFO 0x88, val & 3); matroxfb_DAC_unlock_irqrestore(flags); } static void g450_compute_bwlevel(CPMINFO int *bl, int *wl) { const int b = ACCESS_FBINFO(altout.tvo_params.brightness) + BLMIN; const int c = ACCESS_FBINFO(altout.tvo_params.contrast); *bl = max(b - c, BLMIN); *wl = min(b + c, WLMAX); } static int g450_query_ctrl(void* md, struct matroxfb_queryctrl *p) { int i; i = get_ctrl_id(p->id); if (i >= 0) { *p = g450_controls[i].desc; return 0; } if (i == -ENOENT) { static const struct matroxfb_queryctrl disctrl = { 0, "", 0, 0, 0, 0, 0, 1, 1, "Disabled" }; i = p->id; *p = disctrl; p->id = i; sprintf(p->name, "Ctrl #%08X", i); return 0; } return -EINVAL; } static int g450_set_ctrl(void* md, struct matroxfb_control *p) { int i; MINFO_FROM(md); i = get_ctrl_id(p->id); if (i < 0) return -EINVAL; /* * Check if changed. */ if (p->value == *get_ctrl_ptr(PMINFO i)) return 0; /* * Check limits. */ if (p->value > g450_controls[i].desc.maximum) return -EINVAL; if (p->value < g450_controls[i].desc.minimum) return -EINVAL; /* * Store new value. */ *get_ctrl_ptr(PMINFO i) = p->value; switch (p->id) { case MATROXFB_CID_BRIGHTNESS: case MATROXFB_CID_CONTRAST: { int blacklevel, whitelevel; g450_compute_bwlevel(PMINFO &blacklevel, &whitelevel); cve2_set_reg10(PMINFO 0x0e, blacklevel); cve2_set_reg10(PMINFO 0x1e, whitelevel); } break; case MATROXFB_CID_SATURATION: cve2_set_reg(PMINFO 0x20, p->value); cve2_set_reg(PMINFO 0x22, p->value); break; case MATROXFB_CID_HUE: cve2_set_reg(PMINFO 0x25, p->value); break; case MATROXFB_CID_TESTOUT: { unsigned char val = cve2_get_reg (PMINFO 0x05); if (p->value) val |= 0x02; else val &= ~0x02; cve2_set_reg(PMINFO 0x05, val); } break; } return 0; } static int g450_get_ctrl(void* md, struct matroxfb_control *p) { int i; MINFO_FROM(md); i = get_ctrl_id(p->id); if (i < 0) return -EINVAL; p->value = *get_ctrl_ptr(PMINFO i); return 0; } struct output_desc { unsigned int h_vis; unsigned int h_f_porch; unsigned int h_sync; unsigned int h_b_porch; unsigned long long int chromasc; unsigned int burst; unsigned int v_total; }; static void computeRegs(WPMINFO struct mavenregs* r, struct my_timming* mt, const struct output_desc* outd) { u_int32_t chromasc; u_int32_t hlen; u_int32_t hsl; u_int32_t hbp; u_int32_t hfp; u_int32_t hvis; unsigned int pixclock; unsigned long long piic; int mnp; int over; r->regs[0x80] = 0x03; /* | 0x40 for SCART */ hvis = ((mt->HDisplay << 1) + 3) & ~3; if (hvis >= 2048) { hvis = 2044; } piic = 1000000000ULL * hvis; do_div(piic, outd->h_vis); dprintk(KERN_DEBUG "Want %u kHz pixclock\n", (unsigned int)piic); mnp = matroxfb_g450_setclk(PMINFO piic, M_VIDEO_PLL); mt->mnp = mnp; mt->pixclock = g450_mnp2f(PMINFO mnp); dprintk(KERN_DEBUG "MNP=%08X\n", mnp); pixclock = 1000000000U / mt->pixclock; dprintk(KERN_DEBUG "Got %u ps pixclock\n", pixclock); piic = outd->chromasc; do_div(piic, mt->pixclock); chromasc = piic; dprintk(KERN_DEBUG "Chroma is %08X\n", chromasc); r->regs[0] = piic >> 24; r->regs[1] = piic >> 16; r->regs[2] = piic >> 8; r->regs[3] = piic >> 0; hbp = (((outd->h_b_porch + pixclock) / pixclock)) & ~1; hfp = (((outd->h_f_porch + pixclock) / pixclock)) & ~1; hsl = (((outd->h_sync + pixclock) / pixclock)) & ~1; hlen = hvis + hfp + hsl + hbp; over = hlen & 0x0F; dprintk(KERN_DEBUG "WL: vis=%u, hf=%u, hs=%u, hb=%u, total=%u\n", hvis, hfp, hsl, hbp, hlen); if (over) { hfp -= over; hlen -= over; if (over <= 2) { } else if (over < 10) { hfp += 4; hlen += 4; } else { hfp += 16; hlen += 16; } } /* maybe cve2 has requirement 800 < hlen < 1184 */ r->regs[0x08] = hsl; r->regs[0x09] = (outd->burst + pixclock - 1) / pixclock; /* burst length */ r->regs[0x0A] = hbp; r->regs[0x2C] = hfp; r->regs[0x31] = hvis / 8; r->regs[0x32] = hvis & 7; dprintk(KERN_DEBUG "PG: vis=%04X, hf=%02X, hs=%02X, hb=%02X, total=%04X\n", hvis, hfp, hsl, hbp, hlen); r->regs[0x84] = 1; /* x sync point */ r->regs[0x85] = 0; hvis = hvis >> 1; hlen = hlen >> 1; dprintk(KERN_DEBUG "hlen=%u hvis=%u\n", hlen, hvis); mt->interlaced = 1; mt->HDisplay = hvis & ~7; mt->HSyncStart = mt->HDisplay + 8; mt->HSyncEnd = (hlen & ~7) - 8; mt->HTotal = hlen; { int upper; unsigned int vtotal; unsigned int vsyncend; unsigned int vdisplay; vtotal = mt->VTotal; vsyncend = mt->VSyncEnd; vdisplay = mt->VDisplay; if (vtotal < outd->v_total) { unsigned int yovr = outd->v_total - vtotal; vsyncend += yovr >> 1; } else if (vtotal > outd->v_total) { vdisplay = outd->v_total - 4; vsyncend = outd->v_total; } upper = (outd->v_total - vsyncend) >> 1; /* in field lines */ r->regs[0x17] = outd->v_total / 4; r->regs[0x18] = outd->v_total & 3; r->regs[0x33] = upper - 1; /* upper blanking */ r->regs[0x82] = upper; /* y sync point */ r->regs[0x83] = upper >> 8; mt->VDisplay = vdisplay; mt->VSyncStart = outd->v_total - 2; mt->VSyncEnd = outd->v_total; mt->VTotal = outd->v_total; } } static void cve2_init_TVdata(int norm, struct mavenregs* data, const struct output_desc** outd) { static const struct output_desc paloutd = { .h_vis = 52148148, // ps .h_f_porch = 1407407, // ps .h_sync = 4666667, // ps .h_b_porch = 5777778, // ps .chromasc = 19042247534182ULL, // 4433618.750 Hz .burst = 2518518, // ps .v_total = 625, }; static const struct output_desc ntscoutd = { .h_vis = 52888889, // ps .h_f_porch = 1333333, // ps .h_sync = 4666667, // ps .h_b_porch = 4666667, // ps .chromasc = 15374030659475ULL, // 3579545.454 Hz .burst = 2418418, // ps .v_total = 525, // lines }; static const struct mavenregs palregs = { { 0x2A, 0x09, 0x8A, 0xCB, /* 00: chroma subcarrier */ 0x00, 0x00, /* test */ 0xF9, /* modified by code (F9 written...) */ 0x00, /* ? not written */ 0x7E, /* 08 */ 0x44, /* 09 */ 0x9C, /* 0A */ 0x2E, /* 0B */ 0x21, /* 0C */ 0x00, /* ? not written */ // 0x3F, 0x03, /* 0E-0F */ 0x3C, 0x03, 0x3C, 0x03, /* 10-11 */ 0x1A, /* 12 */ 0x2A, /* 13 */ 0x1C, 0x3D, 0x14, /* 14-16 */ 0x9C, 0x01, /* 17-18 */ 0x00, /* 19 */ 0xFE, /* 1A */ 0x7E, /* 1B */ 0x60, /* 1C */ 0x05, /* 1D */ // 0x89, 0x03, /* 1E-1F */ 0xAD, 0x03, // 0x72, /* 20 */ 0xA5, 0x07, /* 21 */ // 0x72, /* 22 */ 0xA5, 0x00, /* 23 */ 0x00, /* 24 */ 0x00, /* 25 */ 0x08, /* 26 */ 0x04, /* 27 */ 0x00, /* 28 */ 0x1A, /* 29 */ 0x55, 0x01, /* 2A-2B */ 0x26, /* 2C */ 0x07, 0x7E, /* 2D-2E */ 0x02, 0x54, /* 2F-30 */ 0xB0, 0x00, /* 31-32 */ 0x14, /* 33 */ 0x49, /* 34 */ 0x00, /* 35 written multiple times */ 0x00, /* 36 not written */ 0xA3, /* 37 */ 0xC8, /* 38 */ 0x22, /* 39 */ 0x02, /* 3A */ 0x22, /* 3B */ 0x3F, 0x03, /* 3C-3D */ 0x00, /* 3E written multiple times */ 0x00, /* 3F not written */ } }; static struct mavenregs ntscregs = { { 0x21, 0xF0, 0x7C, 0x1F, /* 00: chroma subcarrier */ 0x00, 0x00, /* test */ 0xF9, /* modified by code (F9 written...) */ 0x00, /* ? not written */ 0x7E, /* 08 */ 0x43, /* 09 */ 0x7E, /* 0A */ 0x3D, /* 0B */ 0x00, /* 0C */ 0x00, /* ? not written */ 0x41, 0x00, /* 0E-0F */ 0x3C, 0x00, /* 10-11 */ 0x17, /* 12 */ 0x21, /* 13 */ 0x1B, 0x1B, 0x24, /* 14-16 */ 0x83, 0x01, /* 17-18 */ 0x00, /* 19 */ 0x0F, /* 1A */ 0x0F, /* 1B */ 0x60, /* 1C */ 0x05, /* 1D */ //0x89, 0x02, /* 1E-1F */ 0xC0, 0x02, /* 1E-1F */ //0x5F, /* 20 */ 0x9C, /* 20 */ 0x04, /* 21 */ //0x5F, /* 22 */ 0x9C, /* 22 */ 0x01, /* 23 */ 0x02, /* 24 */ 0x00, /* 25 */ 0x0A, /* 26 */ 0x05, /* 27 */ 0x00, /* 28 */ 0x10, /* 29 */ 0xFF, 0x03, /* 2A-2B */ 0x24, /* 2C */ 0x0F, 0x78, /* 2D-2E */ 0x00, 0x00, /* 2F-30 */ 0xB2, 0x04, /* 31-32 */ 0x14, /* 33 */ 0x02, /* 34 */ 0x00, /* 35 written multiple times */ 0x00, /* 36 not written */ 0xA3, /* 37 */ 0xC8, /* 38 */ 0x15, /* 39 */ 0x05, /* 3A */ 0x3B, /* 3B */ 0x3C, 0x00, /* 3C-3D */ 0x00, /* 3E written multiple times */ 0x00, /* never written */ } }; if (norm == MATROXFB_OUTPUT_MODE_PAL) { *data = palregs; *outd = &paloutd; } else { *data = ntscregs; *outd = &ntscoutd; } return; } #define LR(x) cve2_set_reg(PMINFO (x), m->regs[(x)]) static void cve2_init_TV(WPMINFO const struct mavenregs* m) { int i; LR(0x80); LR(0x82); LR(0x83); LR(0x84); LR(0x85); cve2_set_reg(PMINFO 0x3E, 0x01); for (i = 0; i < 0x3E; i++) { LR(i); } cve2_set_reg(PMINFO 0x3E, 0x00); } static int matroxfb_g450_compute(void* md, struct my_timming* mt) { MINFO_FROM(md); dprintk(KERN_DEBUG "Computing, mode=%u\n", ACCESS_FBINFO(outputs[1]).mode); if (mt->crtc == MATROXFB_SRC_CRTC2 && ACCESS_FBINFO(outputs[1]).mode != MATROXFB_OUTPUT_MODE_MONITOR) { const struct output_desc* outd; cve2_init_TVdata(ACCESS_FBINFO(outputs[1]).mode, &ACCESS_FBINFO(hw).maven, &outd); { int blacklevel, whitelevel; g450_compute_bwlevel(PMINFO &blacklevel, &whitelevel); ACCESS_FBINFO(hw).maven.regs[0x0E] = blacklevel >> 2; ACCESS_FBINFO(hw).maven.regs[0x0F] = blacklevel & 3; ACCESS_FBINFO(hw).maven.regs[0x1E] = whitelevel >> 2; ACCESS_FBINFO(hw).maven.regs[0x1F] = whitelevel & 3; ACCESS_FBINFO(hw).maven.regs[0x20] = ACCESS_FBINFO(hw).maven.regs[0x22] = ACCESS_FBINFO(altout.tvo_params.saturation); ACCESS_FBINFO(hw).maven.regs[0x25] = ACCESS_FBINFO(altout.tvo_params.hue); if (ACCESS_FBINFO(altout.tvo_params.testout)) { ACCESS_FBINFO(hw).maven.regs[0x05] |= 0x02; } } computeRegs(PMINFO &ACCESS_FBINFO(hw).maven, mt, outd); } else if (mt->mnp < 0) { /* We must program clocks before CRTC2, otherwise interlaced mode startup may fail */ mt->mnp = matroxfb_g450_setclk(PMINFO mt->pixclock, (mt->crtc == MATROXFB_SRC_CRTC1) ? M_PIXEL_PLL_C : M_VIDEO_PLL); mt->pixclock = g450_mnp2f(PMINFO mt->mnp); } dprintk(KERN_DEBUG "Pixclock = %u\n", mt->pixclock); return 0; } static int matroxfb_g450_program(void* md) { MINFO_FROM(md); if (ACCESS_FBINFO(outputs[1]).mode != MATROXFB_OUTPUT_MODE_MONITOR) { cve2_init_TV(PMINFO &ACCESS_FBINFO(hw).maven); } return 0; } static int matroxfb_g450_verify_mode(void* md, u_int32_t arg) { // MINFO_FROM(md); switch (arg) { case MATROXFB_OUTPUT_MODE_PAL: case MATROXFB_OUTPUT_MODE_NTSC: case MATROXFB_OUTPUT_MODE_MONITOR: return 0; } return -EINVAL; } static int g450_dvi_compute(void* md, struct my_timming* mt) { MINFO_FROM(md); if (mt->mnp < 0) { mt->mnp = matroxfb_g450_setclk(PMINFO mt->pixclock, (mt->crtc == MATROXFB_SRC_CRTC1) ? M_PIXEL_PLL_C : M_VIDEO_PLL); mt->pixclock = g450_mnp2f(PMINFO mt->mnp); } return 0; } static struct matrox_altout matroxfb_g450_altout = { .owner = THIS_MODULE, .name = "Secondary output", .compute = matroxfb_g450_compute, .program = matroxfb_g450_program, .verifymode = matroxfb_g450_verify_mode, .getqueryctrl = g450_query_ctrl, .getctrl = g450_get_ctrl, .setctrl = g450_set_ctrl, }; static struct matrox_altout matroxfb_g450_dvi = { .owner = THIS_MODULE, .name = "DVI output", .compute = g450_dvi_compute, }; void matroxfb_g450_connect(WPMINFO2) { if (ACCESS_FBINFO(devflags.g450dac)) { down_write(&ACCESS_FBINFO(altout.lock)); tvo_fill_defaults(PMINFO2); ACCESS_FBINFO(outputs[1]).src = MATROXFB_SRC_CRTC1; ACCESS_FBINFO(outputs[1]).data = MINFO; ACCESS_FBINFO(outputs[1]).output = &matroxfb_g450_altout; ACCESS_FBINFO(outputs[1]).mode = MATROXFB_OUTPUT_MODE_MONITOR; ACCESS_FBINFO(outputs[2]).src = MATROXFB_SRC_CRTC1; ACCESS_FBINFO(outputs[2]).data = MINFO; ACCESS_FBINFO(outputs[2]).output = &matroxfb_g450_dvi; ACCESS_FBINFO(outputs[2]).mode = MATROXFB_OUTPUT_MODE_MONITOR; up_write(&ACCESS_FBINFO(altout.lock)); } } void matroxfb_g450_shutdown(WPMINFO2) { if (ACCESS_FBINFO(devflags.g450dac)) { down_write(&ACCESS_FBINFO(altout.lock)); ACCESS_FBINFO(outputs[1]).src = MATROXFB_SRC_NONE; ACCESS_FBINFO(outputs[1]).output = NULL; ACCESS_FBINFO(outputs[1]).data = NULL; ACCESS_FBINFO(outputs[1]).mode = MATROXFB_OUTPUT_MODE_MONITOR; ACCESS_FBINFO(outputs[2]).src = MATROXFB_SRC_NONE; ACCESS_FBINFO(outputs[2]).output = NULL; ACCESS_FBINFO(outputs[2]).data = NULL; ACCESS_FBINFO(outputs[2]).mode = MATROXFB_OUTPUT_MODE_MONITOR; up_write(&ACCESS_FBINFO(altout.lock)); } } EXPORT_SYMBOL(matroxfb_g450_connect); EXPORT_SYMBOL(matroxfb_g450_shutdown); MODULE_AUTHOR("(c) 2000-2002 Petr Vandrovec <vandrove@vc.cvut.cz>"); MODULE_DESCRIPTION("Matrox G450/G550 output driver"); MODULE_LICENSE("GPL");
gpl-2.0
BPI-SINOVOIP/BPI-Mainline-kernel
linux-5.4/drivers/usb/serial/option.c
7
124261
// SPDX-License-Identifier: GPL-2.0 /* USB Driver for GSM modems Copyright (C) 2005 Matthias Urlichs <smurf@smurf.noris.de> Portions copied from the Keyspan driver by Hugh Blemings <hugh@blemings.org> History: see the git log. Work sponsored by: Sigos GmbH, Germany <info@sigos.de> This driver exists because the "normal" serial driver doesn't work too well with GSM modems. Issues: - data loss -- one single Receive URB is not nearly enough - nonstandard flow (Option devices) control - controlling the baud rate doesn't make sense This driver is named "option" because the most common device it's used for is a PC-Card (with an internal OHCI-USB interface, behind which the GSM interface sits), made by Option Inc. Some of the "one port" devices actually exhibit multiple USB instances on the USB bus. This is not a bug, these ports are used for different device features. */ #define DRIVER_AUTHOR "Matthias Urlichs <smurf@smurf.noris.de>" #define DRIVER_DESC "USB Driver for GSM modems" #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/errno.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/bitops.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include "usb-wwan.h" /* Function prototypes */ static int option_probe(struct usb_serial *serial, const struct usb_device_id *id); static int option_attach(struct usb_serial *serial); static void option_release(struct usb_serial *serial); static void option_instat_callback(struct urb *urb); /* Vendor and product IDs */ #define OPTION_VENDOR_ID 0x0AF0 #define OPTION_PRODUCT_COLT 0x5000 #define OPTION_PRODUCT_RICOLA 0x6000 #define OPTION_PRODUCT_RICOLA_LIGHT 0x6100 #define OPTION_PRODUCT_RICOLA_QUAD 0x6200 #define OPTION_PRODUCT_RICOLA_QUAD_LIGHT 0x6300 #define OPTION_PRODUCT_RICOLA_NDIS 0x6050 #define OPTION_PRODUCT_RICOLA_NDIS_LIGHT 0x6150 #define OPTION_PRODUCT_RICOLA_NDIS_QUAD 0x6250 #define OPTION_PRODUCT_RICOLA_NDIS_QUAD_LIGHT 0x6350 #define OPTION_PRODUCT_COBRA 0x6500 #define OPTION_PRODUCT_COBRA_BUS 0x6501 #define OPTION_PRODUCT_VIPER 0x6600 #define OPTION_PRODUCT_VIPER_BUS 0x6601 #define OPTION_PRODUCT_GT_MAX_READY 0x6701 #define OPTION_PRODUCT_FUJI_MODEM_LIGHT 0x6721 #define OPTION_PRODUCT_FUJI_MODEM_GT 0x6741 #define OPTION_PRODUCT_FUJI_MODEM_EX 0x6761 #define OPTION_PRODUCT_KOI_MODEM 0x6800 #define OPTION_PRODUCT_SCORPION_MODEM 0x6901 #define OPTION_PRODUCT_ETNA_MODEM 0x7001 #define OPTION_PRODUCT_ETNA_MODEM_LITE 0x7021 #define OPTION_PRODUCT_ETNA_MODEM_GT 0x7041 #define OPTION_PRODUCT_ETNA_MODEM_EX 0x7061 #define OPTION_PRODUCT_ETNA_KOI_MODEM 0x7100 #define OPTION_PRODUCT_GTM380_MODEM 0x7201 #define HUAWEI_VENDOR_ID 0x12D1 #define HUAWEI_PRODUCT_E173 0x140C #define HUAWEI_PRODUCT_E1750 0x1406 #define HUAWEI_PRODUCT_K4505 0x1464 #define HUAWEI_PRODUCT_K3765 0x1465 #define HUAWEI_PRODUCT_K4605 0x14C6 #define HUAWEI_PRODUCT_E173S6 0x1C07 #define QUANTA_VENDOR_ID 0x0408 #define QUANTA_PRODUCT_Q101 0xEA02 #define QUANTA_PRODUCT_Q111 0xEA03 #define QUANTA_PRODUCT_GLX 0xEA04 #define QUANTA_PRODUCT_GKE 0xEA05 #define QUANTA_PRODUCT_GLE 0xEA06 #define NOVATELWIRELESS_VENDOR_ID 0x1410 /* YISO PRODUCTS */ #define YISO_VENDOR_ID 0x0EAB #define YISO_PRODUCT_U893 0xC893 /* * NOVATEL WIRELESS PRODUCTS * * Note from Novatel Wireless: * If your Novatel modem does not work on linux, don't * change the option module, but check our website. If * that does not help, contact ddeschepper@nvtl.com */ /* MERLIN EVDO PRODUCTS */ #define NOVATELWIRELESS_PRODUCT_V640 0x1100 #define NOVATELWIRELESS_PRODUCT_V620 0x1110 #define NOVATELWIRELESS_PRODUCT_V740 0x1120 #define NOVATELWIRELESS_PRODUCT_V720 0x1130 /* MERLIN HSDPA/HSPA PRODUCTS */ #define NOVATELWIRELESS_PRODUCT_U730 0x1400 #define NOVATELWIRELESS_PRODUCT_U740 0x1410 #define NOVATELWIRELESS_PRODUCT_U870 0x1420 #define NOVATELWIRELESS_PRODUCT_XU870 0x1430 #define NOVATELWIRELESS_PRODUCT_X950D 0x1450 /* EXPEDITE PRODUCTS */ #define NOVATELWIRELESS_PRODUCT_EV620 0x2100 #define NOVATELWIRELESS_PRODUCT_ES720 0x2110 #define NOVATELWIRELESS_PRODUCT_E725 0x2120 #define NOVATELWIRELESS_PRODUCT_ES620 0x2130 #define NOVATELWIRELESS_PRODUCT_EU730 0x2400 #define NOVATELWIRELESS_PRODUCT_EU740 0x2410 #define NOVATELWIRELESS_PRODUCT_EU870D 0x2420 /* OVATION PRODUCTS */ #define NOVATELWIRELESS_PRODUCT_MC727 0x4100 #define NOVATELWIRELESS_PRODUCT_MC950D 0x4400 /* * Note from Novatel Wireless: * All PID in the 5xxx range are currently reserved for * auto-install CDROMs, and should not be added to this * module. * * #define NOVATELWIRELESS_PRODUCT_U727 0x5010 * #define NOVATELWIRELESS_PRODUCT_MC727_NEW 0x5100 */ #define NOVATELWIRELESS_PRODUCT_OVMC760 0x6002 #define NOVATELWIRELESS_PRODUCT_MC780 0x6010 #define NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED 0x6000 #define NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED 0x6001 #define NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED 0x7000 #define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED 0x7001 #define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED3 0x7003 #define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED4 0x7004 #define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED5 0x7005 #define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED6 0x7006 #define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED7 0x7007 #define NOVATELWIRELESS_PRODUCT_MC996D 0x7030 #define NOVATELWIRELESS_PRODUCT_MF3470 0x7041 #define NOVATELWIRELESS_PRODUCT_MC547 0x7042 #define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED 0x8000 #define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED 0x8001 #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000 #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001 #define NOVATELWIRELESS_PRODUCT_E362 0x9010 #define NOVATELWIRELESS_PRODUCT_E371 0x9011 #define NOVATELWIRELESS_PRODUCT_U620L 0x9022 #define NOVATELWIRELESS_PRODUCT_G2 0xA010 #define NOVATELWIRELESS_PRODUCT_MC551 0xB001 /* AMOI PRODUCTS */ #define AMOI_VENDOR_ID 0x1614 #define AMOI_PRODUCT_H01 0x0800 #define AMOI_PRODUCT_H01A 0x7002 #define AMOI_PRODUCT_H02 0x0802 #define AMOI_PRODUCT_SKYPEPHONE_S2 0x0407 #define DELL_VENDOR_ID 0x413C /* Dell modems */ #define DELL_PRODUCT_5700_MINICARD 0x8114 #define DELL_PRODUCT_5500_MINICARD 0x8115 #define DELL_PRODUCT_5505_MINICARD 0x8116 #define DELL_PRODUCT_5700_EXPRESSCARD 0x8117 #define DELL_PRODUCT_5510_EXPRESSCARD 0x8118 #define DELL_PRODUCT_5700_MINICARD_SPRINT 0x8128 #define DELL_PRODUCT_5700_MINICARD_TELUS 0x8129 #define DELL_PRODUCT_5720_MINICARD_VZW 0x8133 #define DELL_PRODUCT_5720_MINICARD_SPRINT 0x8134 #define DELL_PRODUCT_5720_MINICARD_TELUS 0x8135 #define DELL_PRODUCT_5520_MINICARD_CINGULAR 0x8136 #define DELL_PRODUCT_5520_MINICARD_GENERIC_L 0x8137 #define DELL_PRODUCT_5520_MINICARD_GENERIC_I 0x8138 #define DELL_PRODUCT_5730_MINICARD_SPRINT 0x8180 #define DELL_PRODUCT_5730_MINICARD_TELUS 0x8181 #define DELL_PRODUCT_5730_MINICARD_VZW 0x8182 #define DELL_PRODUCT_5800_MINICARD_VZW 0x8195 /* Novatel E362 */ #define DELL_PRODUCT_5800_V2_MINICARD_VZW 0x8196 /* Novatel E362 */ #define DELL_PRODUCT_5804_MINICARD_ATT 0x819b /* Novatel E371 */ #define DELL_PRODUCT_5821E 0x81d7 #define KYOCERA_VENDOR_ID 0x0c88 #define KYOCERA_PRODUCT_KPC650 0x17da #define KYOCERA_PRODUCT_KPC680 0x180a #define ANYDATA_VENDOR_ID 0x16d5 #define ANYDATA_PRODUCT_ADU_620UW 0x6202 #define ANYDATA_PRODUCT_ADU_E100A 0x6501 #define ANYDATA_PRODUCT_ADU_500A 0x6502 #define AXESSTEL_VENDOR_ID 0x1726 #define AXESSTEL_PRODUCT_MV110H 0x1000 #define BANDRICH_VENDOR_ID 0x1A8D #define BANDRICH_PRODUCT_C100_1 0x1002 #define BANDRICH_PRODUCT_C100_2 0x1003 #define BANDRICH_PRODUCT_1004 0x1004 #define BANDRICH_PRODUCT_1005 0x1005 #define BANDRICH_PRODUCT_1006 0x1006 #define BANDRICH_PRODUCT_1007 0x1007 #define BANDRICH_PRODUCT_1008 0x1008 #define BANDRICH_PRODUCT_1009 0x1009 #define BANDRICH_PRODUCT_100A 0x100a #define BANDRICH_PRODUCT_100B 0x100b #define BANDRICH_PRODUCT_100C 0x100c #define BANDRICH_PRODUCT_100D 0x100d #define BANDRICH_PRODUCT_100E 0x100e #define BANDRICH_PRODUCT_100F 0x100f #define BANDRICH_PRODUCT_1010 0x1010 #define BANDRICH_PRODUCT_1011 0x1011 #define BANDRICH_PRODUCT_1012 0x1012 #define QUALCOMM_VENDOR_ID 0x05C6 /* These Quectel products use Qualcomm's vendor ID */ #define QUECTEL_PRODUCT_UC20 0x9003 #define QUECTEL_PRODUCT_UC15 0x9090 /* These u-blox products use Qualcomm's vendor ID */ #define UBLOX_PRODUCT_R410M 0x90b2 /* These Yuga products use Qualcomm's vendor ID */ #define YUGA_PRODUCT_CLM920_NC5 0x9625 #define QUECTEL_VENDOR_ID 0x2c7c /* These Quectel products use Quectel's vendor ID */ #define QUECTEL_PRODUCT_EC21 0x0121 #define QUECTEL_PRODUCT_EC25 0x0125 #define QUECTEL_PRODUCT_BG96 0x0296 #define QUECTEL_PRODUCT_EP06 0x0306 #define QUECTEL_PRODUCT_EM12 0x0512 #define CMOTECH_VENDOR_ID 0x16d8 #define CMOTECH_PRODUCT_6001 0x6001 #define CMOTECH_PRODUCT_CMU_300 0x6002 #define CMOTECH_PRODUCT_6003 0x6003 #define CMOTECH_PRODUCT_6004 0x6004 #define CMOTECH_PRODUCT_6005 0x6005 #define CMOTECH_PRODUCT_CGU_628A 0x6006 #define CMOTECH_PRODUCT_CHE_628S 0x6007 #define CMOTECH_PRODUCT_CMU_301 0x6008 #define CMOTECH_PRODUCT_CHU_628 0x6280 #define CMOTECH_PRODUCT_CHU_628S 0x6281 #define CMOTECH_PRODUCT_CDU_680 0x6803 #define CMOTECH_PRODUCT_CDU_685A 0x6804 #define CMOTECH_PRODUCT_CHU_720S 0x7001 #define CMOTECH_PRODUCT_7002 0x7002 #define CMOTECH_PRODUCT_CHU_629K 0x7003 #define CMOTECH_PRODUCT_7004 0x7004 #define CMOTECH_PRODUCT_7005 0x7005 #define CMOTECH_PRODUCT_CGU_629 0x7006 #define CMOTECH_PRODUCT_CHU_629S 0x700a #define CMOTECH_PRODUCT_CHU_720I 0x7211 #define CMOTECH_PRODUCT_7212 0x7212 #define CMOTECH_PRODUCT_7213 0x7213 #define CMOTECH_PRODUCT_7251 0x7251 #define CMOTECH_PRODUCT_7252 0x7252 #define CMOTECH_PRODUCT_7253 0x7253 #define TELIT_VENDOR_ID 0x1bc7 #define TELIT_PRODUCT_UC864E 0x1003 #define TELIT_PRODUCT_UC864G 0x1004 #define TELIT_PRODUCT_CC864_DUAL 0x1005 #define TELIT_PRODUCT_CC864_SINGLE 0x1006 #define TELIT_PRODUCT_DE910_DUAL 0x1010 #define TELIT_PRODUCT_UE910_V2 0x1012 #define TELIT_PRODUCT_LE922_USBCFG1 0x1040 #define TELIT_PRODUCT_LE922_USBCFG2 0x1041 #define TELIT_PRODUCT_LE922_USBCFG0 0x1042 #define TELIT_PRODUCT_LE922_USBCFG3 0x1043 #define TELIT_PRODUCT_LE922_USBCFG5 0x1045 #define TELIT_PRODUCT_ME910 0x1100 #define TELIT_PRODUCT_ME910_DUAL_MODEM 0x1101 #define TELIT_PRODUCT_LE920 0x1200 #define TELIT_PRODUCT_LE910 0x1201 #define TELIT_PRODUCT_LE910_USBCFG4 0x1206 #define TELIT_PRODUCT_LE920A4_1207 0x1207 #define TELIT_PRODUCT_LE920A4_1208 0x1208 #define TELIT_PRODUCT_LE920A4_1211 0x1211 #define TELIT_PRODUCT_LE920A4_1212 0x1212 #define TELIT_PRODUCT_LE920A4_1213 0x1213 #define TELIT_PRODUCT_LE920A4_1214 0x1214 /* ZTE PRODUCTS */ #define ZTE_VENDOR_ID 0x19d2 #define ZTE_PRODUCT_MF622 0x0001 #define ZTE_PRODUCT_MF628 0x0015 #define ZTE_PRODUCT_MF626 0x0031 #define ZTE_PRODUCT_ZM8620_X 0x0396 #define ZTE_PRODUCT_ME3620_MBIM 0x0426 #define ZTE_PRODUCT_ME3620_X 0x1432 #define ZTE_PRODUCT_ME3620_L 0x1433 #define ZTE_PRODUCT_AC2726 0xfff1 #define ZTE_PRODUCT_MG880 0xfffd #define ZTE_PRODUCT_CDMA_TECH 0xfffe #define ZTE_PRODUCT_AC8710T 0xffff #define ZTE_PRODUCT_MC2718 0xffe8 #define ZTE_PRODUCT_AD3812 0xffeb #define ZTE_PRODUCT_MC2716 0xffed #define BENQ_VENDOR_ID 0x04a5 #define BENQ_PRODUCT_H10 0x4068 #define DLINK_VENDOR_ID 0x1186 #define DLINK_PRODUCT_DWM_652 0x3e04 #define DLINK_PRODUCT_DWM_652_U5 0xce16 #define DLINK_PRODUCT_DWM_652_U5A 0xce1e #define QISDA_VENDOR_ID 0x1da5 #define QISDA_PRODUCT_H21_4512 0x4512 #define QISDA_PRODUCT_H21_4523 0x4523 #define QISDA_PRODUCT_H20_4515 0x4515 #define QISDA_PRODUCT_H20_4518 0x4518 #define QISDA_PRODUCT_H20_4519 0x4519 /* TLAYTECH PRODUCTS */ #define TLAYTECH_VENDOR_ID 0x20B9 #define TLAYTECH_PRODUCT_TEU800 0x1682 /* TOSHIBA PRODUCTS */ #define TOSHIBA_VENDOR_ID 0x0930 #define TOSHIBA_PRODUCT_HSDPA_MINICARD 0x1302 #define TOSHIBA_PRODUCT_G450 0x0d45 #define ALINK_VENDOR_ID 0x1e0e #define SIMCOM_PRODUCT_SIM7100E 0x9001 /* Yes, ALINK_VENDOR_ID */ #define ALINK_PRODUCT_PH300 0x9100 #define ALINK_PRODUCT_3GU 0x9200 /* ALCATEL PRODUCTS */ #define ALCATEL_VENDOR_ID 0x1bbb #define ALCATEL_PRODUCT_X060S_X200 0x0000 #define ALCATEL_PRODUCT_X220_X500D 0x0017 #define ALCATEL_PRODUCT_L100V 0x011e #define ALCATEL_PRODUCT_L800MA 0x0203 #define PIRELLI_VENDOR_ID 0x1266 #define PIRELLI_PRODUCT_C100_1 0x1002 #define PIRELLI_PRODUCT_C100_2 0x1003 #define PIRELLI_PRODUCT_1004 0x1004 #define PIRELLI_PRODUCT_1005 0x1005 #define PIRELLI_PRODUCT_1006 0x1006 #define PIRELLI_PRODUCT_1007 0x1007 #define PIRELLI_PRODUCT_1008 0x1008 #define PIRELLI_PRODUCT_1009 0x1009 #define PIRELLI_PRODUCT_100A 0x100a #define PIRELLI_PRODUCT_100B 0x100b #define PIRELLI_PRODUCT_100C 0x100c #define PIRELLI_PRODUCT_100D 0x100d #define PIRELLI_PRODUCT_100E 0x100e #define PIRELLI_PRODUCT_100F 0x100f #define PIRELLI_PRODUCT_1011 0x1011 #define PIRELLI_PRODUCT_1012 0x1012 /* Airplus products */ #define AIRPLUS_VENDOR_ID 0x1011 #define AIRPLUS_PRODUCT_MCD650 0x3198 /* Longcheer/Longsung vendor ID; makes whitelabel devices that * many other vendors like 4G Systems, Alcatel, ChinaBird, * Mobidata, etc sell under their own brand names. */ #define LONGCHEER_VENDOR_ID 0x1c9e /* 4G Systems products */ /* This is the 4G XS Stick W14 a.k.a. Mobilcom Debitel Surf-Stick * * It seems to contain a Qualcomm QSC6240/6290 chipset */ #define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603 #define FOUR_G_SYSTEMS_PRODUCT_W100 0x9b01 /* Fujisoft products */ #define FUJISOFT_PRODUCT_FS040U 0x9b02 /* iBall 3.5G connect wireless modem */ #define IBALL_3_5G_CONNECT 0x9605 /* Zoom */ #define ZOOM_PRODUCT_4597 0x9607 /* SpeedUp SU9800 usb 3g modem */ #define SPEEDUP_PRODUCT_SU9800 0x9800 /* Haier products */ #define HAIER_VENDOR_ID 0x201e #define HAIER_PRODUCT_CE81B 0x10f8 #define HAIER_PRODUCT_CE100 0x2009 /* Gemalto's Cinterion products (formerly Siemens) */ #define SIEMENS_VENDOR_ID 0x0681 #define CINTERION_VENDOR_ID 0x1e2d #define CINTERION_PRODUCT_HC25_MDMNET 0x0040 #define CINTERION_PRODUCT_HC25_MDM 0x0047 #define CINTERION_PRODUCT_HC28_MDMNET 0x004A /* same for HC28J */ #define CINTERION_PRODUCT_HC28_MDM 0x004C #define CINTERION_PRODUCT_EU3_E 0x0051 #define CINTERION_PRODUCT_EU3_P 0x0052 #define CINTERION_PRODUCT_PH8 0x0053 #define CINTERION_PRODUCT_AHXX 0x0055 #define CINTERION_PRODUCT_PLXX 0x0060 #define CINTERION_PRODUCT_PH8_2RMNET 0x0082 #define CINTERION_PRODUCT_PH8_AUDIO 0x0083 #define CINTERION_PRODUCT_AHXX_2RMNET 0x0084 #define CINTERION_PRODUCT_AHXX_AUDIO 0x0085 #define CINTERION_PRODUCT_CLS8 0x00b0 /* Olivetti products */ #define OLIVETTI_VENDOR_ID 0x0b3c #define OLIVETTI_PRODUCT_OLICARD100 0xc000 #define OLIVETTI_PRODUCT_OLICARD120 0xc001 #define OLIVETTI_PRODUCT_OLICARD140 0xc002 #define OLIVETTI_PRODUCT_OLICARD145 0xc003 #define OLIVETTI_PRODUCT_OLICARD155 0xc004 #define OLIVETTI_PRODUCT_OLICARD200 0xc005 #define OLIVETTI_PRODUCT_OLICARD160 0xc00a #define OLIVETTI_PRODUCT_OLICARD500 0xc00b /* Celot products */ #define CELOT_VENDOR_ID 0x211f #define CELOT_PRODUCT_CT680M 0x6801 /* Samsung products */ #define SAMSUNG_VENDOR_ID 0x04e8 #define SAMSUNG_PRODUCT_GT_B3730 0x6889 /* YUGA products www.yuga-info.com gavin.kx@qq.com */ #define YUGA_VENDOR_ID 0x257A #define YUGA_PRODUCT_CEM600 0x1601 #define YUGA_PRODUCT_CEM610 0x1602 #define YUGA_PRODUCT_CEM500 0x1603 #define YUGA_PRODUCT_CEM510 0x1604 #define YUGA_PRODUCT_CEM800 0x1605 #define YUGA_PRODUCT_CEM900 0x1606 #define YUGA_PRODUCT_CEU818 0x1607 #define YUGA_PRODUCT_CEU816 0x1608 #define YUGA_PRODUCT_CEU828 0x1609 #define YUGA_PRODUCT_CEU826 0x160A #define YUGA_PRODUCT_CEU518 0x160B #define YUGA_PRODUCT_CEU516 0x160C #define YUGA_PRODUCT_CEU528 0x160D #define YUGA_PRODUCT_CEU526 0x160F #define YUGA_PRODUCT_CEU881 0x161F #define YUGA_PRODUCT_CEU882 0x162F #define YUGA_PRODUCT_CWM600 0x2601 #define YUGA_PRODUCT_CWM610 0x2602 #define YUGA_PRODUCT_CWM500 0x2603 #define YUGA_PRODUCT_CWM510 0x2604 #define YUGA_PRODUCT_CWM800 0x2605 #define YUGA_PRODUCT_CWM900 0x2606 #define YUGA_PRODUCT_CWU718 0x2607 #define YUGA_PRODUCT_CWU716 0x2608 #define YUGA_PRODUCT_CWU728 0x2609 #define YUGA_PRODUCT_CWU726 0x260A #define YUGA_PRODUCT_CWU518 0x260B #define YUGA_PRODUCT_CWU516 0x260C #define YUGA_PRODUCT_CWU528 0x260D #define YUGA_PRODUCT_CWU581 0x260E #define YUGA_PRODUCT_CWU526 0x260F #define YUGA_PRODUCT_CWU582 0x261F #define YUGA_PRODUCT_CWU583 0x262F #define YUGA_PRODUCT_CLM600 0x3601 #define YUGA_PRODUCT_CLM610 0x3602 #define YUGA_PRODUCT_CLM500 0x3603 #define YUGA_PRODUCT_CLM510 0x3604 #define YUGA_PRODUCT_CLM800 0x3605 #define YUGA_PRODUCT_CLM900 0x3606 #define YUGA_PRODUCT_CLU718 0x3607 #define YUGA_PRODUCT_CLU716 0x3608 #define YUGA_PRODUCT_CLU728 0x3609 #define YUGA_PRODUCT_CLU726 0x360A #define YUGA_PRODUCT_CLU518 0x360B #define YUGA_PRODUCT_CLU516 0x360C #define YUGA_PRODUCT_CLU528 0x360D #define YUGA_PRODUCT_CLU526 0x360F /* Viettel products */ #define VIETTEL_VENDOR_ID 0x2262 #define VIETTEL_PRODUCT_VT1000 0x0002 /* ZD Incorporated */ #define ZD_VENDOR_ID 0x0685 #define ZD_PRODUCT_7000 0x7000 /* LG products */ #define LG_VENDOR_ID 0x1004 #define LG_PRODUCT_L02C 0x618f /* MediaTek products */ #define MEDIATEK_VENDOR_ID 0x0e8d #define MEDIATEK_PRODUCT_DC_1COM 0x00a0 #define MEDIATEK_PRODUCT_DC_4COM 0x00a5 #define MEDIATEK_PRODUCT_DC_4COM2 0x00a7 #define MEDIATEK_PRODUCT_DC_5COM 0x00a4 #define MEDIATEK_PRODUCT_7208_1COM 0x7101 #define MEDIATEK_PRODUCT_7208_2COM 0x7102 #define MEDIATEK_PRODUCT_7103_2COM 0x7103 #define MEDIATEK_PRODUCT_7106_2COM 0x7106 #define MEDIATEK_PRODUCT_FP_1COM 0x0003 #define MEDIATEK_PRODUCT_FP_2COM 0x0023 #define MEDIATEK_PRODUCT_FPDC_1COM 0x0043 #define MEDIATEK_PRODUCT_FPDC_2COM 0x0033 /* Cellient products */ #define CELLIENT_VENDOR_ID 0x2692 #define CELLIENT_PRODUCT_MEN200 0x9005 /* Hyundai Petatel Inc. products */ #define PETATEL_VENDOR_ID 0x1ff4 #define PETATEL_PRODUCT_NP10T_600A 0x600a #define PETATEL_PRODUCT_NP10T_600E 0x600e /* TP-LINK Incorporated products */ #define TPLINK_VENDOR_ID 0x2357 #define TPLINK_PRODUCT_LTE 0x000D #define TPLINK_PRODUCT_MA180 0x0201 /* Changhong products */ #define CHANGHONG_VENDOR_ID 0x2077 #define CHANGHONG_PRODUCT_CH690 0x7001 /* Inovia */ #define INOVIA_VENDOR_ID 0x20a6 #define INOVIA_SEW858 0x1105 /* VIA Telecom */ #define VIATELECOM_VENDOR_ID 0x15eb #define VIATELECOM_PRODUCT_CDS7 0x0001 /* WeTelecom products */ #define WETELECOM_VENDOR_ID 0x22de #define WETELECOM_PRODUCT_WMD200 0x6801 #define WETELECOM_PRODUCT_6802 0x6802 #define WETELECOM_PRODUCT_WMD300 0x6803 /* Device flags */ /* Interface does not support modem-control requests */ #define NCTRL(ifnum) ((BIT(ifnum) & 0xff) << 8) /* Interface is reserved */ #define RSVD(ifnum) ((BIT(ifnum) & 0xff) << 0) /* Interface must have two endpoints */ #define NUMEP2 BIT(16) static const struct usb_device_id option_ids[] = { { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_LIGHT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_QUAD) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_QUAD_LIGHT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_NDIS) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_NDIS_LIGHT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_NDIS_QUAD) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_NDIS_QUAD_LIGHT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COBRA) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COBRA_BUS) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_VIPER) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_VIPER_BUS) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_GT_MAX_READY) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_FUJI_MODEM_LIGHT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_FUJI_MODEM_GT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_FUJI_MODEM_EX) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_KOI_MODEM) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_SCORPION_MODEM) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_MODEM) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_MODEM_LITE) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_MODEM_GT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_MODEM_EX) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_ETNA_KOI_MODEM) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_GTM380_MODEM) }, { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_Q101) }, { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_Q111) }, { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLX) }, { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) }, { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) }, { USB_DEVICE(QUANTA_VENDOR_ID, 0xea42), .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c05, USB_CLASS_COMM, 0x02, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c1f, USB_CLASS_COMM, 0x02, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff), .driver_info = RSVD(1) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S6, 0xff, 0xff, 0xff), .driver_info = RSVD(1) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1750, 0xff, 0xff, 0xff), .driver_info = RSVD(2) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff), .driver_info = RSVD(1) | RSVD(2) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff), .driver_info = RSVD(1) | RSVD(2) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x14ac, 0xff, 0xff, 0xff), /* Huawei E1820 */ .driver_info = RSVD(1) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff), .driver_info = RSVD(1) | RSVD(2) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0xff, 0xff) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x01) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x02) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x03) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x04) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x05) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x06) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0B) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0D) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0E) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0F) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x10) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x12) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x13) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x14) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x15) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x17) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x18) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x19) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1B) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1C) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x31) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x32) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x33) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x34) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x35) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x36) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3B) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3D) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3E) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3F) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x48) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x49) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4B) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4C) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x61) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x62) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x63) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x64) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x65) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x66) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6B) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6D) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6E) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6F) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x72) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x73) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x74) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x75) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x78) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x79) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7B) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7C) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x01) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x02) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x03) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x04) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x05) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x06) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0B) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0D) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0E) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0F) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x10) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x12) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x13) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x14) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x15) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x17) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x18) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x19) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1B) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1C) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x31) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x32) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x33) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x34) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x35) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x36) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3B) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3D) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3E) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3F) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x48) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x49) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4B) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4C) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x61) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x62) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x63) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x64) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x65) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x66) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6B) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6D) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6E) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6F) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x72) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x73) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x74) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x75) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x78) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x79) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7B) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7C) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x01) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x02) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x03) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x04) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x05) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x06) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0B) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0D) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0E) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0F) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x10) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x12) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x13) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x14) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x15) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x17) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x18) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x19) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1B) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1C) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x31) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x32) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x33) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x34) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x35) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x36) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3B) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3D) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3E) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3F) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x48) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x49) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4B) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4C) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x61) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x62) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x63) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x64) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x65) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x66) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6B) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6D) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6E) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6F) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x72) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x73) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x74) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x75) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x78) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x79) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7B) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7C) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x01) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x02) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x03) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x04) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x05) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x06) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0B) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0D) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0E) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0F) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x10) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x12) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x13) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x14) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x15) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x17) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x18) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x19) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1B) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1C) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x31) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x32) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x33) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x34) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x35) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x36) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3B) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3D) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3E) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3F) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x48) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x49) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4B) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4C) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x61) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x62) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x63) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x64) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x65) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x66) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6B) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6D) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6E) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6F) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x72) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x73) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x74) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x75) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x78) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x79) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7B) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7C) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x01) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x02) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x03) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x04) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x05) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x06) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0B) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0D) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0E) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0F) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x10) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x12) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x13) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x14) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x15) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x17) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x18) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x19) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1B) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1C) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x31) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x32) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x33) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x34) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x35) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x36) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3B) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3D) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3E) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3F) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x48) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x49) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4B) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4C) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x61) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x62) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x63) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x64) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x65) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x66) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6B) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6D) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6E) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6F) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x72) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x73) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x74) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x75) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x78) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x79) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7B) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7C) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x01) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x02) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x03) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x04) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x05) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x06) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0B) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0D) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0E) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0F) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x10) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x12) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x13) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x14) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x15) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x17) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x18) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x19) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1B) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1C) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x31) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x32) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x33) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x34) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x35) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x36) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3B) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3D) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3E) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3F) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x48) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x49) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4B) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4C) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x61) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x62) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x63) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x64) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x65) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x66) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6B) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6D) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6E) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6F) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x72) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x73) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x74) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x75) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x78) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x79) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7A) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) }, /* Motorola devices */ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2a70, 0xff, 0xff, 0xff) }, /* mdm6600 */ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2e0a, 0xff, 0xff, 0xff) }, /* mdm9600 */ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x4281, 0x0a, 0x00, 0xfc) }, /* mdm ram dl */ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x900e, 0xff, 0xff, 0xff) }, /* mdm qc dl */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V720) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U730) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U740) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U870) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_XU870) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_X950D) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EV620) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES720) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E725) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES620) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU730) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU740) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC780) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED3) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED4) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED5) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED6) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED7) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC996D) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MF3470) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC547) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) }, /* Novatel Ovation MC551 a.k.a. Verizon USB551L */ { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E371, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U620L, 0xff, 0x00, 0x00) }, { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H02) }, { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_SKYPEPHONE_S2) }, { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_MINICARD) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite EV620 CDMA/EV-DO */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5500_MINICARD) }, /* Dell Wireless 5500 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5505_MINICARD) }, /* Dell Wireless 5505 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_EXPRESSCARD) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO ExpressCard == Novatel Merlin XV620 CDMA/EV-DO */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5510_EXPRESSCARD) }, /* Dell Wireless 5510 Mobile Broadband HSDPA ExpressCard == Novatel Merlin XU870 HSDPA/3G */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_MINICARD_SPRINT) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite E720 CDMA/EV-DO */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5700_MINICARD_TELUS) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite ET620 CDMA/EV-DO */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5720_MINICARD_VZW) }, /* Dell Wireless 5720 == Novatel EV620 CDMA/EV-DO */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5720_MINICARD_SPRINT) }, /* Dell Wireless 5720 == Novatel EV620 CDMA/EV-DO */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5720_MINICARD_TELUS) }, /* Dell Wireless 5720 == Novatel EV620 CDMA/EV-DO */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5520_MINICARD_CINGULAR) }, /* Dell Wireless HSDPA 5520 == Novatel Expedite EU860D */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5520_MINICARD_GENERIC_L) }, /* Dell Wireless HSDPA 5520 */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5520_MINICARD_GENERIC_I) }, /* Dell Wireless 5520 Voda I Mobile Broadband (3G HSDPA) Minicard */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_SPRINT) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_TELUS) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_VZW) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_MINICARD_VZW, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_V2_MINICARD_VZW, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5804_MINICARD_ATT, 0xff, 0xff, 0xff) }, { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E), .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, { USB_DEVICE(AXESSTEL_VENDOR_ID, AXESSTEL_PRODUCT_MV110H) }, { USB_DEVICE(YISO_VENDOR_ID, YISO_PRODUCT_U893) }, { USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1004, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1005, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1006, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1007, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1008, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1009, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100A, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100B, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100C, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100D, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100E, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100F, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1010, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1011, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1012, 0xff) }, { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) }, { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */ { USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */ .driver_info = RSVD(3) }, { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000), /* SIMCom SIM5218 */ .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) | NCTRL(3) | RSVD(4) }, /* Quectel products using Qualcomm vendor ID */ { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)}, { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20), .driver_info = RSVD(4) }, /* Yuga products use Qualcomm vendor ID */ { USB_DEVICE(QUALCOMM_VENDOR_ID, YUGA_PRODUCT_CLM920_NC5), .driver_info = RSVD(1) | RSVD(4) }, /* u-blox products using Qualcomm vendor ID */ { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M), .driver_info = RSVD(1) | RSVD(3) }, /* Quectel products using Quectel vendor ID */ { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21), .driver_info = RSVD(4) }, { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25), .driver_info = RSVD(4) }, { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96), .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff), .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff), .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), .driver_info = RSVD(0) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6004) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6005) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_628A) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHE_628S), .driver_info = RSVD(0) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_301), .driver_info = RSVD(0) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628), .driver_info = RSVD(0) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628S) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_680) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_685A) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720S), .driver_info = RSVD(0) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7002), .driver_info = RSVD(0) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629K), .driver_info = RSVD(4) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7004), .driver_info = RSVD(3) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7005) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_629), .driver_info = RSVD(5) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629S), .driver_info = RSVD(4) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720I), .driver_info = RSVD(0) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7212), .driver_info = RSVD(0) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7213), .driver_info = RSVD(0) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7251), .driver_info = RSVD(1) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7252), .driver_info = RSVD(1) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7253), .driver_info = RSVD(1) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0), .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1), .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG2), .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3), .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff), .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1050, 0xff), /* Telit FN980 (rmnet) */ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1051, 0xff), /* Telit FN980 (MBIM) */ .driver_info = NCTRL(0) | RSVD(1) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1052, 0xff), /* Telit FN980 (RNDIS) */ .driver_info = NCTRL(2) | RSVD(3) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1053, 0xff), /* Telit FN980 (ECM) */ .driver_info = NCTRL(0) | RSVD(1) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910), .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM), .driver_info = NCTRL(0) | RSVD(3) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1102, 0xff), /* Telit ME910 (ECM) */ .driver_info = NCTRL(0) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4), .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), .driver_info = NCTRL(0) | RSVD(1) | RSVD(5) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1207) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1208), .driver_info = NCTRL(0) | RSVD(1) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1211), .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1212), .driver_info = NCTRL(0) | RSVD(1) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214), .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, 0x1260), .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, { USB_DEVICE(TELIT_VENDOR_ID, 0x1261), .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, { USB_DEVICE(TELIT_VENDOR_ID, 0x1900), /* Telit LN940 (QMI) */ .driver_info = NCTRL(0) | RSVD(1) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */ .driver_info = NCTRL(0) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), .driver_info = RSVD(1) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0003, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0004, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0005, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0006, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0008, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0009, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000a, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000b, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000c, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000d, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000e, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000f, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0010, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0011, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff), .driver_info = RSVD(1) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff), .driver_info = RSVD(3) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff), .driver_info = RSVD(3) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff), .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0022, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0023, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff), .driver_info = RSVD(1) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, 0xff, 0xff), .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0032, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0033, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0034, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0037, 0xff, 0xff, 0xff), .driver_info = NCTRL(0) | NCTRL(1) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0038, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0039, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0040, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0042, 0xff, 0xff, 0xff), .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0043, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0044, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0048, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0049, 0xff, 0xff, 0xff), .driver_info = RSVD(5) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0050, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff), .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff), .driver_info = RSVD(1) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0056, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff), .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff), .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0064, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0065, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0067, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0069, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0076, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0077, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0078, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0079, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0082, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0083, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0086, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0087, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0088, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0089, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0090, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0091, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0092, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0093, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0095, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0096, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0097, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff), .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0105, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0106, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0108, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff), .driver_info = RSVD(5) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0117, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff), .driver_info = RSVD(5) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff), .driver_info = RSVD(5) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0122, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff), .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff), .driver_info = RSVD(5) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff), .driver_info = RSVD(6) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff), .driver_info = RSVD(5) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0128, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0135, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0136, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0137, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0139, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0142, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0143, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0144, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0145, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0148, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0151, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0153, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0155, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff), .driver_info = RSVD(5) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff), .driver_info = RSVD(3) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0164, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff), .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */ .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0196, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0197, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0199, 0xff, 0xff, 0xff), /* ZTE MF820S */ .driver_info = RSVD(1) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0200, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0201, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0254, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */ .driver_info = RSVD(3) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff), /* ONDA MT8205 */ .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff), /* ZTE MF880 */ .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0317, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff), .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0330, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0395, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0412, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G */ .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x0601, 0xff) }, /* GosunCn ZTE WeLink ME3630 (RNDIS mode) */ { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x0602, 0xff) }, /* GosunCn ZTE WeLink ME3630 (MBIM mode) */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff), .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff), .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff), .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1018, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1021, 0xff, 0xff, 0xff), .driver_info = RSVD(2) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1060, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1061, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1062, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1063, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1064, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1065, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1066, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1067, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1068, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1069, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1070, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1071, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1072, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1073, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1074, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1075, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1076, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1077, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1078, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1079, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1080, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1081, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1082, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1083, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1084, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1085, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1086, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1087, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1088, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1089, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1090, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1091, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1092, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1093, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1094, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1095, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1096, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1097, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1098, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1099, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1100, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1101, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1102, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1103, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1104, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1105, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1106, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1107, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1108, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1109, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1110, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1111, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1112, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1113, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1114, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1115, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1116, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1117, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1118, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1119, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1120, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1121, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1122, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1123, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1124, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1125, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1126, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1127, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1128, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1129, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1130, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1131, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1132, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1133, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1134, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1135, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1136, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1137, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1138, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1139, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1140, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1141, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1142, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1143, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1144, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1145, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1146, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1147, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1148, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1149, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1150, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1151, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1152, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1153, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1154, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1155, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1156, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1157, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1158, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1159, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1160, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1161, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1162, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1163, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1164, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1165, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1166, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1167, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1168, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1169, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1170, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1244, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff), .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1246, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff), .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1248, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1249, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1250, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1251, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff), .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1253, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff), .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff), .driver_info = RSVD(3) | RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff), .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1257, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1258, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1259, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1260, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1261, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1262, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1263, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1264, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1265, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1266, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1267, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff), .driver_info = RSVD(5) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1274, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1275, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1276, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1277, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1278, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1279, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1280, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1281, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1282, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1283, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1284, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1285, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1286, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1287, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1288, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1289, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1290, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1291, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1292, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1293, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1294, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1295, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1296, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1297, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1301, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1302, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1303, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1333, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1401, 0xff, 0xff, 0xff), .driver_info = RSVD(2) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff), .driver_info = RSVD(2) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1424, 0xff, 0xff, 0xff), .driver_info = RSVD(2) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1425, 0xff, 0xff, 0xff), .driver_info = RSVD(2) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff), /* ZTE MF91 */ .driver_info = RSVD(2) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */ .driver_info = RSVD(2) }, { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0x00, 0x00) }, /* ZTE MF871A */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1545, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1546, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1547, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1565, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1566, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1567, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1589, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1590, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1591, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1592, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1594, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1596, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1598, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1600, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff), .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) | RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0060, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff), .driver_info = RSVD(1) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff), .driver_info = RSVD(3) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff), .driver_info = RSVD(5) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff), .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0170, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff), .driver_info = RSVD(3) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff), .driver_info = RSVD(3) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff42, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff43, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff44, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff45, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff46, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff47, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff48, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff49, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4a, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4b, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4c, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4d, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4e, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4f, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff50, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff51, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff52, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff53, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff54, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff55, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff56, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff57, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff58, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff59, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5a, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5b, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5c, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5d, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5e, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5f, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff60, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff61, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff62, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff63, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff64, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff65, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff66, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff67, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff68, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff69, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6a, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6b, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6c, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6d, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6e, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6f, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff70, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff71, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff72, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff73, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff74, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff75, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff76, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff77, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff78, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff79, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7a, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7b, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7c, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7d, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7e, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7f, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff80, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff81, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff82, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff83, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff84, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff85, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff86, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff87, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff88, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff89, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8a, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8b, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8c, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8d, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8e, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8f, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff90, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff91, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff92, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff93, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff94, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff9f, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa0, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa1, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa2, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa3, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa4, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa5, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa6, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa7, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa8, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa9, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffaa, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffab, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffac, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffae, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffaf, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb0, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb1, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb2, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb3, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb4, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb5, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb6, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb7, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb8, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb9, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffba, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbb, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbc, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbd, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbe, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbf, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc0, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc1, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc2, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc3, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc4, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc5, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc6, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc7, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc8, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc9, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffca, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcb, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcc, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcd, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffce, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcf, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd0, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd1, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd2, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd3, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd4, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd5, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffe9, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffec, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffee, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xfff6, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xfff7, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xfff8, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xfff9, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xfffb, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xfffc, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MG880, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff), .driver_info = NCTRL(1) | NCTRL(2) | NCTRL(3) | NCTRL(4) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AD3812, 0xff, 0xff, 0xff), .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff), .driver_info = NCTRL(1) | NCTRL(2) | NCTRL(3) }, { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_L), .driver_info = RSVD(3) | RSVD(4) | RSVD(5) }, { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_MBIM), .driver_info = RSVD(2) | RSVD(3) | RSVD(4) }, { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_X), .driver_info = RSVD(3) | RSVD(4) | RSVD(5) }, { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ZM8620_X), .driver_info = RSVD(3) | RSVD(4) | RSVD(5) }, { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) }, { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) }, { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) }, { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */ { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5A) }, { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) }, { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4523) }, { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4515) }, { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4518) }, { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4519) }, { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_G450) }, { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */ { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) }, { USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) }, { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E), .driver_info = RSVD(5) | RSVD(6) }, { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) }, /* Simcom SIM7500/SIM7600 MBIM mode */ { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9011, 0xff), /* Simcom SIM7500/SIM7600 RNDIS mode */ .driver_info = RSVD(7) }, { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200), .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) }, { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D), .driver_info = RSVD(6) }, { USB_DEVICE(ALCATEL_VENDOR_ID, 0x0052), .driver_info = RSVD(6) }, { USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b6), .driver_info = RSVD(3) }, { USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b7), .driver_info = RSVD(5) }, { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V), .driver_info = RSVD(4) }, { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L800MA), .driver_info = RSVD(2) }, { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) }, { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) }, { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14), .driver_info = NCTRL(0) | NCTRL(1) }, { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100), .driver_info = NCTRL(1) | NCTRL(2) | RSVD(3) }, {USB_DEVICE(LONGCHEER_VENDOR_ID, FUJISOFT_PRODUCT_FS040U), .driver_info = RSVD(3)}, { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9801, 0xff), .driver_info = RSVD(3) }, { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9803, 0xff), .driver_info = RSVD(4) }, { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) }, { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) }, { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) }, { USB_DEVICE_AND_INTERFACE_INFO(HAIER_VENDOR_ID, HAIER_PRODUCT_CE81B, 0xff, 0xff, 0xff) }, /* Pirelli */ { USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_1, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_2, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1004, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1005, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1006, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1007, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1008, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1009, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100A, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100B, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100C, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100D, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100E, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012, 0xff) }, /* Cinterion */ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8), .driver_info = RSVD(4) }, { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX, 0xff) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX), .driver_info = RSVD(4) }, { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_2RMNET, 0xff), .driver_info = RSVD(4) | RSVD(5) }, { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_AUDIO, 0xff), .driver_info = RSVD(4) }, { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_2RMNET, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_CLS8, 0xff), .driver_info = RSVD(0) | RSVD(4) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) }, { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) }, { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100), .driver_info = RSVD(4) }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120), .driver_info = RSVD(4) }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD140), .driver_info = RSVD(4) }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD155), .driver_info = RSVD(6) }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200), .driver_info = RSVD(6) }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD160), .driver_info = RSVD(6) }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD500), .driver_info = RSVD(4) }, { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM610) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM500) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM510) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM800) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM900) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU818) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU816) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU828) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU826) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU518) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU516) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU528) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU526) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM600) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM610) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM500) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM510) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM800) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWM900) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU718) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU716) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU728) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU726) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU518) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU516) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU528) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU526) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM600) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM610) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM500) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM510) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM800) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLM900) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU718) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU716) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU728) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU726) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU518) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU881) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU882) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU581) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU582) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU583) }, { USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZD_VENDOR_ID, ZD_PRODUCT_7000, 0xff, 0xff, 0xff) }, { USB_DEVICE(LG_VENDOR_ID, LG_PRODUCT_L02C) }, /* docomo L-02C modem */ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a1, 0xff, 0x00, 0x00) }, { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a1, 0xff, 0x02, 0x01) }, { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x00, 0x00) }, { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x02, 0x01) }, /* MediaTek MT6276M modem & app port */ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_1COM, 0x0a, 0x00, 0x00) }, { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_5COM, 0xff, 0x02, 0x01) }, { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_5COM, 0xff, 0x00, 0x00) }, { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM, 0xff, 0x02, 0x01) }, { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM, 0xff, 0x00, 0x00) }, { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7208_1COM, 0x02, 0x00, 0x00) }, { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7208_2COM, 0x02, 0x02, 0x01) }, { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_1COM, 0x0a, 0x00, 0x00) }, { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_2COM, 0x0a, 0x00, 0x00) }, { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_1COM, 0x0a, 0x00, 0x00) }, { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_2COM, 0x0a, 0x00, 0x00) }, { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7103_2COM, 0xff, 0x00, 0x00) }, { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7106_2COM, 0x02, 0x02, 0x01) }, { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) }, { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) }, { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) }, { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) }, { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) }, { USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, TPLINK_PRODUCT_LTE, 0xff, 0x00, 0x00) }, /* TP-Link LTE Module */ { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180), .driver_info = RSVD(4) }, { USB_DEVICE(TPLINK_VENDOR_ID, 0x9000), /* TP-Link MA260 */ .driver_info = RSVD(4) }, { USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) }, { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d01, 0xff) }, /* D-Link DWM-156 (variant) */ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d02, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d03, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff), /* D-Link DWM-158 */ .driver_info = RSVD(4) | RSVD(5) }, { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d0e, 0xff) }, /* D-Link DWM-157 C1 */ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */ .driver_info = RSVD(4) }, { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */ .driver_info = RSVD(4) }, { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e3d, 0xff), /* D-Link DWM-222 A2 */ .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */ .driver_info = RSVD(4) }, { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2060, 0xff), /* BroadMobi BM818 */ .driver_info = RSVD(4) }, { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */ { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) }, { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) }, { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x10) }, /* HP lt4132 (Huawei ME906s-158) */ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x12) }, { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x13) }, { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x14) }, { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x1b) }, { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 */ .driver_info = RSVD(4) | RSVD(5) | RSVD(6) }, { USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */ .driver_info = RSVD(4) | RSVD(5) }, { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */ .driver_info = RSVD(6) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); /* The card has three separate interfaces, which the serial driver * recognizes separately, thus num_port=1. */ static struct usb_serial_driver option_1port_device = { .driver = { .owner = THIS_MODULE, .name = "option1", }, .description = "GSM modem (1-port)", .id_table = option_ids, .num_ports = 1, .probe = option_probe, .open = usb_wwan_open, .close = usb_wwan_close, .dtr_rts = usb_wwan_dtr_rts, .write = usb_wwan_write, .write_room = usb_wwan_write_room, .chars_in_buffer = usb_wwan_chars_in_buffer, .tiocmget = usb_wwan_tiocmget, .tiocmset = usb_wwan_tiocmset, .get_serial = usb_wwan_get_serial_info, .set_serial = usb_wwan_set_serial_info, .attach = option_attach, .release = option_release, .port_probe = usb_wwan_port_probe, .port_remove = usb_wwan_port_remove, .read_int_callback = option_instat_callback, #ifdef CONFIG_PM .suspend = usb_wwan_suspend, .resume = usb_wwan_resume, #endif }; static struct usb_serial_driver * const serial_drivers[] = { &option_1port_device, NULL }; module_usb_serial_driver(serial_drivers, option_ids); static int option_probe(struct usb_serial *serial, const struct usb_device_id *id) { struct usb_interface_descriptor *iface_desc = &serial->interface->cur_altsetting->desc; unsigned long device_flags = id->driver_info; /* Never bind to the CD-Rom emulation interface */ if (iface_desc->bInterfaceClass == USB_CLASS_MASS_STORAGE) return -ENODEV; /* * Don't bind reserved interfaces (like network ones) which often have * the same class/subclass/protocol as the serial interfaces. Look at * the Windows driver .INF files for reserved interface numbers. */ if (device_flags & RSVD(iface_desc->bInterfaceNumber)) return -ENODEV; /* * Allow matching on bNumEndpoints for devices whose interface numbers * can change (e.g. Quectel EP06). */ if (device_flags & NUMEP2 && iface_desc->bNumEndpoints != 2) return -ENODEV; /* Store the device flags so we can use them during attach. */ usb_set_serial_data(serial, (void *)device_flags); return 0; } static int option_attach(struct usb_serial *serial) { struct usb_interface_descriptor *iface_desc; struct usb_wwan_intf_private *data; unsigned long device_flags; data = kzalloc(sizeof(struct usb_wwan_intf_private), GFP_KERNEL); if (!data) return -ENOMEM; /* Retrieve device flags stored at probe. */ device_flags = (unsigned long)usb_get_serial_data(serial); iface_desc = &serial->interface->cur_altsetting->desc; if (!(device_flags & NCTRL(iface_desc->bInterfaceNumber))) data->use_send_setup = 1; spin_lock_init(&data->susp_lock); usb_set_serial_data(serial, data); return 0; } static void option_release(struct usb_serial *serial) { struct usb_wwan_intf_private *intfdata = usb_get_serial_data(serial); kfree(intfdata); } static void option_instat_callback(struct urb *urb) { int err; int status = urb->status; struct usb_serial_port *port = urb->context; struct device *dev = &port->dev; struct usb_wwan_port_private *portdata = usb_get_serial_port_data(port); dev_dbg(dev, "%s: urb %p port %p has data %p\n", __func__, urb, port, portdata); if (status == 0) { struct usb_ctrlrequest *req_pkt = (struct usb_ctrlrequest *)urb->transfer_buffer; if (!req_pkt) { dev_dbg(dev, "%s: NULL req_pkt\n", __func__); return; } if ((req_pkt->bRequestType == 0xA1) && (req_pkt->bRequest == 0x20)) { int old_dcd_state; unsigned char signals = *((unsigned char *) urb->transfer_buffer + sizeof(struct usb_ctrlrequest)); dev_dbg(dev, "%s: signal x%x\n", __func__, signals); old_dcd_state = portdata->dcd_state; portdata->cts_state = 1; portdata->dcd_state = ((signals & 0x01) ? 1 : 0); portdata->dsr_state = ((signals & 0x02) ? 1 : 0); portdata->ri_state = ((signals & 0x08) ? 1 : 0); if (old_dcd_state && !portdata->dcd_state) tty_port_tty_hangup(&port->port, true); } else { dev_dbg(dev, "%s: type %x req %x\n", __func__, req_pkt->bRequestType, req_pkt->bRequest); } } else if (status == -ENOENT || status == -ESHUTDOWN) { dev_dbg(dev, "%s: urb stopped: %d\n", __func__, status); } else dev_dbg(dev, "%s: error %d\n", __func__, status); /* Resubmit urb so we continue receiving IRQ data */ if (status != -ESHUTDOWN && status != -ENOENT) { usb_mark_last_busy(port->serial->dev); err = usb_submit_urb(urb, GFP_ATOMIC); if (err) dev_dbg(dev, "%s: resubmit intr urb failed. (%d)\n", __func__, err); } } MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL v2");
gpl-2.0
gskywolf/Firefly-RK3288-Kernel
drivers/mfd/rk818-irq.c
7
6450
/* * rk818-irq.c * * Author: zhangqing <zhangqing@rock-chips.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/bug.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/gpio.h> #include <linux/mfd/rk818.h> #include <linux/wakelock.h> #include <linux/kthread.h> #include <linux/irqdomain.h> #include <linux/regmap.h> static inline int irq_to_rk818_irq(struct rk818 *rk818, int irq) { return (irq - rk818->chip_irq); } /* * This is a threaded IRQ handler so can access I2C/SPI. Since all * interrupts are clear on read the IRQ line will be reasserted and * the physical IRQ will be handled again if another interrupt is * asserted while we run - in the normal course of events this is a * rare occurrence so we save I2C/SPI reads. We're also assuming that * it's rare to get lots of interrupts firing simultaneously so try to * minimise I/O. */ static irqreturn_t rk818_irq(int irq, void *irq_data) { struct rk818 *rk818 = irq_data; u32 irq_sts; u32 irq_mask; u8 reg; int i; //printk(" rk818 irq %d \n",irq); wake_lock(&rk818->irq_wake); rk818_i2c_read(rk818, RK818_INT_STS_REG1, 1, &reg); irq_sts = reg; rk818_i2c_read(rk818, RK818_INT_STS_REG2, 1, &reg); irq_sts |= reg << 8; rk818_i2c_read(rk818, RK818_INT_STS_MSK_REG1, 1, &reg); irq_mask = reg; rk818_i2c_read(rk818, RK818_INT_STS_MSK_REG2, 1, &reg); irq_mask |= reg << 8; irq_sts &= ~irq_mask; if (!irq_sts) { wake_unlock(&rk818->irq_wake); return IRQ_NONE; } for (i = 0; i < rk818->irq_num; i++) { if (!(irq_sts & (1 << i))) continue; handle_nested_irq(rk818->irq_base + i); } /* Write the STS register back to clear IRQs we handled */ reg = irq_sts & 0xFF; irq_sts >>= 8; rk818_i2c_write(rk818, RK818_INT_STS_REG1, 1, reg); reg = irq_sts & 0xFF; rk818_i2c_write(rk818, RK818_INT_STS_REG2, 1, reg); wake_unlock(&rk818->irq_wake); return IRQ_HANDLED; } static void rk818_irq_lock(struct irq_data *data) { struct rk818 *rk818 = irq_data_get_irq_chip_data(data); mutex_lock(&rk818->irq_lock); } static void rk818_irq_sync_unlock(struct irq_data *data) { struct rk818 *rk818 = irq_data_get_irq_chip_data(data); u32 reg_mask; u8 reg; rk818_i2c_read(rk818, RK818_INT_STS_MSK_REG1, 1, &reg); reg_mask = reg; rk818_i2c_read(rk818, RK818_INT_STS_MSK_REG2, 1, &reg); reg_mask |= reg << 8; if (rk818->irq_mask != reg_mask) { reg = rk818->irq_mask & 0xff; // rk818_i2c_write(rk818, RK818_INT_STS_MSK_REG1, 1, reg); reg = rk818->irq_mask >> 8 & 0xff; // rk818_i2c_write(rk818, RK818_INT_STS_MSK_REG2, 1, reg); } mutex_unlock(&rk818->irq_lock); } static void rk818_irq_enable(struct irq_data *data) { struct rk818 *rk818 = irq_data_get_irq_chip_data(data); rk818->irq_mask &= ~( 1 << irq_to_rk818_irq(rk818, data->irq)); } static void rk818_irq_disable(struct irq_data *data) { struct rk818 *rk818 = irq_data_get_irq_chip_data(data); rk818->irq_mask |= ( 1 << irq_to_rk818_irq(rk818, data->irq)); } #ifdef CONFIG_PM_SLEEP static int rk818_irq_set_wake(struct irq_data *data, unsigned int enable) { struct rk818 *rk818 = irq_data_get_irq_chip_data(data); return irq_set_irq_wake(rk818->chip_irq, enable); } #else #define rk818_irq_set_wake NULL #endif static struct irq_chip rk818_irq_chip = { .name = "rk818", .irq_bus_lock = rk818_irq_lock, .irq_bus_sync_unlock = rk818_irq_sync_unlock, .irq_disable = rk818_irq_disable, .irq_enable = rk818_irq_enable, .irq_set_wake = rk818_irq_set_wake, }; static int rk818_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { struct rk818 *rk818 = d->host_data; irq_set_chip_data(irq, rk818); irq_set_chip_and_handler(irq, &rk818_irq_chip, handle_edge_irq); irq_set_nested_thread(irq, 1); #ifdef CONFIG_ARM set_irq_flags(irq, IRQF_VALID); #else irq_set_noprobe(irq); #endif return 0; } static struct irq_domain_ops rk818_irq_domain_ops = { .map = rk818_irq_domain_map, }; int rk818_irq_init(struct rk818 *rk818, int irq,struct rk818_board *pdata) { struct irq_domain *domain; int ret,val,irq_type,flags; u8 reg; // printk("%s,line=%d\n", __func__,__LINE__); if (!irq) { dev_warn(rk818->dev, "No interrupt support, no core IRQ\n"); return 0; } /* Clear unattended interrupts */ rk818_i2c_read(rk818, RK818_INT_STS_REG1, 1, &reg); rk818_i2c_write(rk818, RK818_INT_STS_REG1, 1, reg); rk818_i2c_read(rk818, RK818_INT_STS_REG2, 1, &reg); rk818_i2c_write(rk818, RK818_INT_STS_REG2, 1, reg); rk818_i2c_read(rk818, RK818_RTC_STATUS_REG, 1, &reg); rk818_i2c_write(rk818, RK818_RTC_STATUS_REG, 1, reg);//clear alarm and timer interrupt /* Mask top level interrupts */ rk818->irq_mask = 0xFFFFFF; mutex_init(&rk818->irq_lock); wake_lock_init(&rk818->irq_wake, WAKE_LOCK_SUSPEND, "rk818_irq_wake"); rk818->irq_num = RK818_NUM_IRQ; rk818->irq_gpio = pdata->irq_gpio; if (rk818->irq_gpio && !rk818->chip_irq) { rk818->chip_irq = gpio_to_irq(rk818->irq_gpio); if (rk818->irq_gpio) { ret = gpio_request(rk818->irq_gpio, "rk818_pmic_irq"); if (ret < 0) { dev_err(rk818->dev, "Failed to request gpio %d with ret:" "%d\n", rk818->irq_gpio, ret); return IRQ_NONE; } gpio_direction_input(rk818->irq_gpio); val = gpio_get_value(rk818->irq_gpio); if (val){ irq_type = IRQ_TYPE_LEVEL_LOW; flags = IRQF_TRIGGER_FALLING; } else{ irq_type = IRQ_TYPE_LEVEL_HIGH; flags = IRQF_TRIGGER_RISING; } gpio_free(rk818->irq_gpio); pr_info("%s: rk818_pmic_irq=%x\n", __func__, val); } } domain = irq_domain_add_linear(NULL, RK818_NUM_IRQ, &rk818_irq_domain_ops, rk818); if (!domain) { dev_err(rk818->dev, "could not create irq domain\n"); return -ENODEV; } rk818->irq_domain = domain; ret = request_threaded_irq(rk818->chip_irq, NULL, rk818_irq, flags | IRQF_ONESHOT, "rk818", rk818); irq_set_irq_type(rk818->chip_irq, irq_type); enable_irq_wake(rk818->chip_irq); if (ret != 0) dev_err(rk818->dev, "Failed to request IRQ: %d\n", ret); return ret; } int rk818_irq_exit(struct rk818 *rk818) { if (rk818->chip_irq) free_irq(rk818->chip_irq, rk818); return 0; }
gpl-2.0
SerenityS/android_kernel_allwinner_a31
arch/arm/mach-sun6i/rf/wifi_pm_mtk6620.c
7
2755
/* * mtk6620 usb wifi power management API */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> #include <mach/sys_config.h> #include <mach/gpio.h> #include <linux/regulator/consumer.h> #include "wifi_pm.h" #define mtk6620_msg(...) do {printk("[mtk6620]: "__VA_ARGS__);} while(0) static int mtk6620_powerup = 0; static int mtk6620_suspend = 0; static char * axp_name = NULL; // power control by axp static int mtk6620_module_power(int onoff) { struct regulator* wifi_ldo = NULL; static int first = 1; int ret = 0; mtk6620_msg("mtk6620 module power set by axp.\n"); wifi_ldo = regulator_get(NULL, axp_name); if (!wifi_ldo) { mtk6620_msg("get power regulator failed.\n"); return -ret; } if (onoff) { mtk6620_msg("regulator on.\n"); ret = regulator_set_voltage(wifi_ldo, 3000000, 3000000); if (ret < 0) { mtk6620_msg("regulator_set_voltage fail, return %d.\n", ret); goto out; } ret = regulator_enable(wifi_ldo); if (ret < 0) { mtk6620_msg("regulator_enable fail, return %d.\n", ret); goto out; } } else { mtk6620_msg("regulator off.\n"); ret = regulator_disable(wifi_ldo); if (ret < 0) { mtk6620_msg("regulator_disable fail, return %d.\n", ret); goto out; } } out: regulator_put(wifi_ldo); wifi_ldo = NULL; return ret; } void mtk6620_power(int mode, int *updown) { if (mode) { if (*updown) { mtk6620_module_power(1); udelay(50); mtk6620_powerup = 1; } else { mtk6620_module_power(0); mtk6620_powerup = 0; } mtk6620_msg("mtk6620 wifi power state: %s\n", *updown ? "on" : "off"); } else { if (mtk6620_powerup) *updown = 1; else *updown = 0; mtk6620_msg("usb wifi power state: %s\n", mtk6620_powerup ? "on" : "off"); } return; } static void mtk6620_standby(int instadby) { if (instadby) { if (mtk6620_powerup) { /*can't poweroff axp_dldo2 because mtk6620 VRTC pin links it. fix by huzhen2013-1-23*/ //mtk6620_module_power(0); mtk6620_suspend = 1; } } else { if (mtk6620_suspend) { //mtk6620_module_power(1); mtk6620_suspend = 0; } } mtk6620_msg("usb wifi : %s\n", instadby ? "suspend" : "resume"); } void mtk6620_gpio_init(void) { script_item_u val; script_item_value_type_e type; struct wifi_pm_ops *ops = &wifi_select_pm_ops; mtk6620_msg("exec mtk6620_wifi_gpio_init\n"); type = script_get_item(wifi_para, "wifi_power", &val); if (SCIRPT_ITEM_VALUE_TYPE_STR != type) { mtk6620_msg("failed to fetch wifi_power\n"); return ; } axp_name = val.str; mtk6620_msg("module power name %s\n", axp_name); mtk6620_powerup = 0; mtk6620_suspend = 0; ops->power = mtk6620_power; ops->standby = mtk6620_standby; }
gpl-2.0
davidbau/davej
drivers/scsi/mca_53c9x.c
7
12366
/* mca_53c9x.c: Driver for the SCSI adapter found on NCR 35xx * (and maybe some other) Microchannel machines * * Code taken mostly from Cyberstorm SCSI drivers * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk) * * Hacked to work with the NCR MCA stuff by Tymm Twillman (tymm@computer.org) * * The CyberStorm SCSI driver (and this driver) is based on David S. Miller's * ESP driver * for the Sparc computers. * * Special thanks to Ken Stewart at Symbios (LSI) for helping with info on * the 86C01. I was on the brink of going ga-ga... * * Also thanks to Jesper Skov for helping me with info on how the Amiga * does things... */ /* * This is currently only set up to use one 53c9x card at a time; it could be * changed fairly easily to detect/use more than one, but I'm not too sure how * many cards that use the 53c9x on MCA systems there are (if, in fact, there * are cards that use them, other than the one built into some NCR systems)... * If anyone requests this, I'll throw it in, otherwise it's not worth the * effort. */ /* * Info on the 86C01 MCA interface chip at the bottom, if you care enough to * look. */ #include <linux/kernel.h> #include <linux/delay.h> #include <linux/types.h> #include <linux/string.h> #include <linux/malloc.h> #include <linux/blk.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include "scsi.h" #include "hosts.h" #include "NCR53C9x.h" #include "mca_53c9x.h" #include <asm/dma.h> #include <linux/mca.h> #include <asm/irq.h> #include <asm/mca_dma.h> #include <asm/pgtable.h> static int dma_bytes_sent(struct NCR_ESP *, int); static int dma_can_transfer(struct NCR_ESP *, Scsi_Cmnd *); static void dma_dump_state(struct NCR_ESP *); static void dma_init_read(struct NCR_ESP *, __u32, int); static void dma_init_write(struct NCR_ESP *, __u32, int); static void dma_ints_off(struct NCR_ESP *); static void dma_ints_on(struct NCR_ESP *); static int dma_irq_p(struct NCR_ESP *); static int dma_ports_p(struct NCR_ESP *); static void dma_setup(struct NCR_ESP *, __u32, int, int); static void dma_led_on(struct NCR_ESP *); static void dma_led_off(struct NCR_ESP *); /* This is where all commands are put before they are trasfered to the * 53c9x via PIO. */ volatile unsigned char cmd_buffer[16]; /* * We keep the structure that is used to access the registers on the 53c9x * here. */ static struct ESP_regs eregs; /***************************************************************** Detection */ int mca_esp_detect(Scsi_Host_Template *tpnt) { struct NCR_ESP *esp; static int io_port_by_pos[] = MCA_53C9X_IO_PORTS; int mca_53c9x_ids[] = MCA_53C9X_IDS; int *id_to_check = mca_53c9x_ids; int slot; int pos[3]; unsigned int tmp_io_addr; unsigned char tmp_byte; if (!MCA_bus) return 0; while (*id_to_check) { if ((slot = mca_find_adapter(*id_to_check, 0)) != MCA_NOTFOUND) { esp = esp_allocate(tpnt, (void *) NULL); pos[0] = mca_read_stored_pos(slot, 2); pos[1] = mca_read_stored_pos(slot, 3); pos[2] = mca_read_stored_pos(slot, 4); esp->eregs = &eregs; /* * IO port base is given in the first (non-ID) pos * register, like so: * * Bits 3 2 1 IO base * ---------------------------- * 0 0 0 <disabled> * 0 0 1 0x0240 * 0 1 0 0x0340 * 0 1 1 0x0400 * 1 0 0 0x0420 * 1 0 1 0x3240 * 1 1 0 0x8240 * 1 1 1 0xA240 */ tmp_io_addr = io_port_by_pos[(pos[0] & 0x0E) >> 1]; esp->eregs->io_addr = tmp_io_addr + 0x10; if (esp->eregs->io_addr == 0x0000) { printk("Adapter is disabled.\n"); break; } /* * IRQ is specified in bits 4 and 5: * * Bits 4 5 IRQ * ----------------------- * 0 0 3 * 0 1 5 * 1 0 7 * 1 1 9 */ esp->irq = ((pos[0] & 0x30) >> 3) + 3; /* * DMA channel is in the low 3 bits of the second * POS register */ esp->dma = pos[1] & 7; esp->slot = slot; if (request_irq(esp->irq, esp_intr, 0, "NCR 53c9x SCSI", esp_intr)) { printk("Unable to request IRQ %d.\n", esp->irq); esp_deallocate(esp); scsi_unregister(esp->ehost); return 0; } if (request_dma(esp->dma, "NCR 53c9x SCSI")) { printk("Unable to request DMA channel %d.\n", esp->dma); free_irq(esp->irq, esp_intr); esp_deallocate(esp); scsi_unregister(esp->ehost); return 0; } request_region(tmp_io_addr, 32, "NCR 53c9x SCSI"); /* * 86C01 handles DMA, IO mode, from address * (base + 0x0a) */ mca_disable_dma(esp->dma); mca_set_dma_io(esp->dma, tmp_io_addr + 0x0a); mca_enable_dma(esp->dma); /* Tell the 86C01 to give us interrupts */ tmp_byte = inb(tmp_io_addr + 0x02) | 0x40; outb(tmp_byte, tmp_io_addr + 0x02); /* * Scsi ID -- general purpose register, hi * 2 bits; add 4 to this number to get the * ID */ esp->scsi_id = ((pos[2] & 0xC0) >> 6) + 4; /* Do command transfer with programmed I/O */ esp->do_pio_cmds = 1; /* Required functions */ esp->dma_bytes_sent = &dma_bytes_sent; esp->dma_can_transfer = &dma_can_transfer; esp->dma_dump_state = &dma_dump_state; esp->dma_init_read = &dma_init_read; esp->dma_init_write = &dma_init_write; esp->dma_ints_off = &dma_ints_off; esp->dma_ints_on = &dma_ints_on; esp->dma_irq_p = &dma_irq_p; esp->dma_ports_p = &dma_ports_p; esp->dma_setup = &dma_setup; /* Optional functions */ esp->dma_barrier = 0; esp->dma_drain = 0; esp->dma_invalidate = 0; esp->dma_irq_entry = 0; esp->dma_irq_exit = 0; esp->dma_led_on = dma_led_on; esp->dma_led_off = dma_led_off; esp->dma_poll = 0; esp->dma_reset = 0; /* Set the command buffer */ esp->esp_command = (volatile unsigned char*) cmd_buffer; esp->esp_command_dvma = virt_to_bus(cmd_buffer); /* SCSI chip speed */ esp->cfreq = 25000000; /* Differential SCSI? I think not. */ esp->diff = 0; esp_initialize(esp); printk(" Adapter found in slot %2d: io port 0x%x " "irq %d dma channel %d\n", slot + 1, tmp_io_addr, esp->irq, esp->dma); mca_set_adapter_name(slot, "NCR 53C9X SCSI Adapter"); mca_mark_as_used(slot); break; } id_to_check++; } return esps_in_use; } /******************************************************************* Release */ int mca_esp_release(struct Scsi_Host *host) { struct NCR_ESP *esp = (struct NCR_ESP *)host->hostdata; unsigned char tmp_byte; esp_deallocate(esp); /* * Tell the 86C01 to stop sending interrupts */ tmp_byte = inb(esp->eregs->io_addr - 0x0E); tmp_byte &= ~0x40; outb(tmp_byte, esp->eregs->io_addr - 0x0E); free_irq(esp->irq, esp_intr); free_dma(esp->dma); mca_mark_as_unused(esp->slot); return 0; } /************************************************************* DMA Functions */ static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count) { /* Ask the 53c9x. It knows. */ return fifo_count; } static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp) { /* * The MCA dma channels can only do up to 128K bytes at a time. * (16 bit mode) */ unsigned long sz = sp->SCp.this_residual; if(sz > 0x20000) sz = 0x20000; return sz; } static void dma_dump_state(struct NCR_ESP *esp) { /* * Doesn't quite match up to the other drivers, but we do what we * can. */ ESPLOG(("esp%d: dma channel <%d>\n", esp->esp_id, esp->dma)); ESPLOG(("bytes left to dma: %d\n", mca_get_dma_residue(esp->dma))); } static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length) { unsigned long flags; save_flags(flags); cli(); mca_disable_dma(esp->dma); mca_set_dma_mode(esp->dma, MCA_DMA_MODE_XFER | MCA_DMA_MODE_16 | MCA_DMA_MODE_IO); mca_set_dma_addr(esp->dma, addr); mca_set_dma_count(esp->dma, length / 2); /* !!! */ mca_enable_dma(esp->dma); restore_flags(flags); } static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length) { unsigned long flags; save_flags(flags); cli(); mca_disable_dma(esp->dma); mca_set_dma_mode(esp->dma, MCA_DMA_MODE_XFER | MCA_DMA_MODE_WRITE | MCA_DMA_MODE_16 | MCA_DMA_MODE_IO); mca_set_dma_addr(esp->dma, addr); mca_set_dma_count(esp->dma, length / 2); /* !!! */ mca_enable_dma(esp->dma); restore_flags(flags); } static void dma_ints_off(struct NCR_ESP *esp) { /* * Tell the 'C01 to shut up. All interrupts are routed through it. */ outb(inb(esp->eregs->io_addr - 0x0E) & ~0x40, esp->eregs->io_addr - 0x0E); } static void dma_ints_on(struct NCR_ESP *esp) { /* * Ok. You can speak again. */ outb(inb(esp->eregs->io_addr - 0x0E) | 0x40, esp->eregs->io_addr - 0x0E); } static int dma_irq_p(struct NCR_ESP *esp) { /* * DaveM says that this should return a "yes" if there is an interrupt * or a DMA error occurred. I copied the Amiga driver's semantics, * though, because it seems to work and we can't really tell if * a DMA error happened. This gives the "yes" if the scsi chip * is sending an interrupt and no DMA activity is taking place */ return (!(inb(esp->eregs->io_addr - 0x04) & 1) && !(inb(esp->eregs->io_addr - 0x04) & 2) ); } static int dma_ports_p(struct NCR_ESP *esp) { /* * Check to see if interrupts are enabled on the 'C01 (in case abort * is entered multiple times, so we only do the abort once) */ return (inb(esp->eregs->io_addr - 0x0E) & 0x40) ? 1:0; } static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write) { if(write){ dma_init_write(esp, addr, count); } else { dma_init_read(esp, addr, count); } } /* * These will not play nicely with other disk controllers that try to use the * disk active LED... but what can you do? Don't answer that. * * Stolen shamelessly from ibmmca.c -- IBM Microchannel SCSI adapter driver * */ static void dma_led_on(struct NCR_ESP *esp) { outb(inb(PS2_SYS_CTR) | 0xc0, PS2_SYS_CTR); } static void dma_led_off(struct NCR_ESP *esp) { outb(inb(PS2_SYS_CTR) & 0x3f, PS2_SYS_CTR); } static Scsi_Host_Template driver_template = MCA_53C9X; #include "scsi_module.c" /* * OK, here's the goods I promised. The NCR 86C01 is an MCA interface chip * that handles enabling/diabling IRQ, dma interfacing, IO port selection * and other fun stuff. It takes up 16 addresses, and the chip it is * connnected to gets the following 16. Registers are as follows: * * Offsets 0-1 : Card ID * * Offset 2 : Mode enable register -- * Bit 7 : Data Word width (1 = 16, 0 = 8) * Bit 6 : IRQ enable (1 = enabled) * Bits 5,4 : IRQ select * 0 0 : IRQ 3 * 0 1 : IRQ 5 * 1 0 : IRQ 7 * 1 1 : IRQ 9 * Bits 3-1 : Base Address * 0 0 0 : <disabled> * 0 0 1 : 0x0240 * 0 1 0 : 0x0340 * 0 1 1 : 0x0400 * 1 0 0 : 0x0420 * 1 0 1 : 0x3240 * 1 1 0 : 0x8240 * 1 1 1 : 0xA240 * Bit 0 : Card enable (1 = enabled) * * Offset 3 : DMA control register -- * Bit 7 : DMA enable (1 = enabled) * Bits 6,5 : Preemt Count Select (transfers to complete after * 'C01 has been preempted on MCA bus) * 0 0 : 0 * 0 1 : 1 * 1 0 : 3 * 1 1 : 7 * (all these wacky numbers; I'm sure there's a reason somewhere) * Bit 4 : Fairness enable (1 = fair bus priority) * Bits 3-0 : Arbitration level (0-15 consecutive) * * Offset 4 : General purpose register * Bits 7-3 : User definable (here, 7,6 are SCSI ID) * Bits 2-0 : reserved * * Offset 10 : DMA decode register (used for IO based DMA; also can do * PIO through this port) * * Offset 12 : Status * Bits 7-2 : reserved * Bit 1 : DMA pending (1 = pending) * Bit 0 : IRQ pending (0 = pending) * * Exciting, huh? * */
gpl-2.0
auras76/MM_6.0_32.1.A.1.185
fs/btrfs/disk-io.c
263
110815
/* * Copyright (C) 2007 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License v2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/fs.h> #include <linux/blkdev.h> #include <linux/scatterlist.h> #include <linux/swap.h> #include <linux/radix-tree.h> #include <linux/writeback.h> #include <linux/buffer_head.h> #include <linux/workqueue.h> #include <linux/kthread.h> #include <linux/freezer.h> #include <linux/crc32c.h> #include <linux/slab.h> #include <linux/migrate.h> #include <linux/ratelimit.h> #include <linux/uuid.h> #include <asm/unaligned.h> #include "compat.h" #include "ctree.h" #include "disk-io.h" #include "transaction.h" #include "btrfs_inode.h" #include "volumes.h" #include "print-tree.h" #include "async-thread.h" #include "locking.h" #include "tree-log.h" #include "free-space-cache.h" #include "inode-map.h" #include "check-integrity.h" #include "rcu-string.h" #include "dev-replace.h" #include "raid56.h" #ifdef CONFIG_X86 #include <asm/cpufeature.h> #endif static struct extent_io_ops btree_extent_io_ops; static void end_workqueue_fn(struct btrfs_work *work); static void free_fs_root(struct btrfs_root *root); static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, int read_only); static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t, struct btrfs_root *root); static void btrfs_destroy_ordered_extents(struct btrfs_root *root); static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, struct btrfs_root *root); static void btrfs_evict_pending_snapshots(struct btrfs_transaction *t); static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root); static int btrfs_destroy_marked_extents(struct btrfs_root *root, struct extent_io_tree *dirty_pages, int mark); static int btrfs_destroy_pinned_extent(struct btrfs_root *root, struct extent_io_tree *pinned_extents); static int btrfs_cleanup_transaction(struct btrfs_root *root); static void btrfs_error_commit_super(struct btrfs_root *root); /* * end_io_wq structs are used to do processing in task context when an IO is * complete. This is used during reads to verify checksums, and it is used * by writes to insert metadata for new file extents after IO is complete. */ struct end_io_wq { struct bio *bio; bio_end_io_t *end_io; void *private; struct btrfs_fs_info *info; int error; int metadata; struct list_head list; struct btrfs_work work; }; /* * async submit bios are used to offload expensive checksumming * onto the worker threads. They checksum file and metadata bios * just before they are sent down the IO stack. */ struct async_submit_bio { struct inode *inode; struct bio *bio; struct list_head list; extent_submit_bio_hook_t *submit_bio_start; extent_submit_bio_hook_t *submit_bio_done; int rw; int mirror_num; unsigned long bio_flags; /* * bio_offset is optional, can be used if the pages in the bio * can't tell us where in the file the bio should go */ u64 bio_offset; struct btrfs_work work; int error; }; /* * Lockdep class keys for extent_buffer->lock's in this root. For a given * eb, the lockdep key is determined by the btrfs_root it belongs to and * the level the eb occupies in the tree. * * Different roots are used for different purposes and may nest inside each * other and they require separate keysets. As lockdep keys should be * static, assign keysets according to the purpose of the root as indicated * by btrfs_root->objectid. This ensures that all special purpose roots * have separate keysets. * * Lock-nesting across peer nodes is always done with the immediate parent * node locked thus preventing deadlock. As lockdep doesn't know this, use * subclass to avoid triggering lockdep warning in such cases. * * The key is set by the readpage_end_io_hook after the buffer has passed * csum validation but before the pages are unlocked. It is also set by * btrfs_init_new_buffer on freshly allocated blocks. * * We also add a check to make sure the highest level of the tree is the * same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this code * needs update as well. */ #ifdef CONFIG_DEBUG_LOCK_ALLOC # if BTRFS_MAX_LEVEL != 8 # error # endif static struct btrfs_lockdep_keyset { u64 id; /* root objectid */ const char *name_stem; /* lock name stem */ char names[BTRFS_MAX_LEVEL + 1][20]; struct lock_class_key keys[BTRFS_MAX_LEVEL + 1]; } btrfs_lockdep_keysets[] = { { .id = BTRFS_ROOT_TREE_OBJECTID, .name_stem = "root" }, { .id = BTRFS_EXTENT_TREE_OBJECTID, .name_stem = "extent" }, { .id = BTRFS_CHUNK_TREE_OBJECTID, .name_stem = "chunk" }, { .id = BTRFS_DEV_TREE_OBJECTID, .name_stem = "dev" }, { .id = BTRFS_FS_TREE_OBJECTID, .name_stem = "fs" }, { .id = BTRFS_CSUM_TREE_OBJECTID, .name_stem = "csum" }, { .id = BTRFS_QUOTA_TREE_OBJECTID, .name_stem = "quota" }, { .id = BTRFS_TREE_LOG_OBJECTID, .name_stem = "log" }, { .id = BTRFS_TREE_RELOC_OBJECTID, .name_stem = "treloc" }, { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc" }, { .id = 0, .name_stem = "tree" }, }; void __init btrfs_init_lockdep(void) { int i, j; /* initialize lockdep class names */ for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) { struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i]; for (j = 0; j < ARRAY_SIZE(ks->names); j++) snprintf(ks->names[j], sizeof(ks->names[j]), "btrfs-%s-%02d", ks->name_stem, j); } } void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, int level) { struct btrfs_lockdep_keyset *ks; BUG_ON(level >= ARRAY_SIZE(ks->keys)); /* find the matching keyset, id 0 is the default entry */ for (ks = btrfs_lockdep_keysets; ks->id; ks++) if (ks->id == objectid) break; lockdep_set_class_and_name(&eb->lock, &ks->keys[level], ks->names[level]); } #endif /* * extents on the btree inode are pretty simple, there's one extent * that covers the entire device */ static struct extent_map *btree_get_extent(struct inode *inode, struct page *page, size_t pg_offset, u64 start, u64 len, int create) { struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; struct extent_map *em; int ret; read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, start, len); if (em) { em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; read_unlock(&em_tree->lock); goto out; } read_unlock(&em_tree->lock); em = alloc_extent_map(); if (!em) { em = ERR_PTR(-ENOMEM); goto out; } em->start = 0; em->len = (u64)-1; em->block_len = (u64)-1; em->block_start = 0; em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; write_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em, 0); if (ret == -EEXIST) { free_extent_map(em); em = lookup_extent_mapping(em_tree, start, len); if (!em) em = ERR_PTR(-EIO); } else if (ret) { free_extent_map(em); em = ERR_PTR(ret); } write_unlock(&em_tree->lock); out: return em; } u32 btrfs_csum_data(char *data, u32 seed, size_t len) { return crc32c(seed, data, len); } void btrfs_csum_final(u32 crc, char *result) { put_unaligned_le32(~crc, result); } /* * compute the csum for a btree block, and either verify it or write it * into the csum field of the block. */ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf, int verify) { u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); char *result = NULL; unsigned long len; unsigned long cur_len; unsigned long offset = BTRFS_CSUM_SIZE; char *kaddr; unsigned long map_start; unsigned long map_len; int err; u32 crc = ~(u32)0; unsigned long inline_result; len = buf->len - offset; while (len > 0) { err = map_private_extent_buffer(buf, offset, 32, &kaddr, &map_start, &map_len); if (err) return 1; cur_len = min(len, map_len - (offset - map_start)); crc = btrfs_csum_data(kaddr + offset - map_start, crc, cur_len); len -= cur_len; offset += cur_len; } if (csum_size > sizeof(inline_result)) { result = kzalloc(csum_size * sizeof(char), GFP_NOFS); if (!result) return 1; } else { result = (char *)&inline_result; } btrfs_csum_final(crc, result); if (verify) { if (memcmp_extent_buffer(buf, result, 0, csum_size)) { u32 val; u32 found = 0; memcpy(&found, result, csum_size); read_extent_buffer(buf, &val, 0, csum_size); printk_ratelimited(KERN_INFO "btrfs: %s checksum verify " "failed on %llu wanted %X found %X " "level %d\n", root->fs_info->sb->s_id, (unsigned long long)buf->start, val, found, btrfs_header_level(buf)); if (result != (char *)&inline_result) kfree(result); return 1; } } else { write_extent_buffer(buf, result, 0, csum_size); } if (result != (char *)&inline_result) kfree(result); return 0; } /* * we can't consider a given block up to date unless the transid of the * block matches the transid in the parent node's pointer. This is how we * detect blocks that either didn't get written at all or got written * in the wrong place. */ static int verify_parent_transid(struct extent_io_tree *io_tree, struct extent_buffer *eb, u64 parent_transid, int atomic) { struct extent_state *cached_state = NULL; int ret; if (!parent_transid || btrfs_header_generation(eb) == parent_transid) return 0; if (atomic) return -EAGAIN; lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1, 0, &cached_state); if (extent_buffer_uptodate(eb) && btrfs_header_generation(eb) == parent_transid) { ret = 0; goto out; } printk_ratelimited("parent transid verify failed on %llu wanted %llu " "found %llu\n", (unsigned long long)eb->start, (unsigned long long)parent_transid, (unsigned long long)btrfs_header_generation(eb)); ret = 1; clear_extent_buffer_uptodate(eb); out: unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1, &cached_state, GFP_NOFS); return ret; } /* * Return 0 if the superblock checksum type matches the checksum value of that * algorithm. Pass the raw disk superblock data. */ static int btrfs_check_super_csum(char *raw_disk_sb) { struct btrfs_super_block *disk_sb = (struct btrfs_super_block *)raw_disk_sb; u16 csum_type = btrfs_super_csum_type(disk_sb); int ret = 0; if (csum_type == BTRFS_CSUM_TYPE_CRC32) { u32 crc = ~(u32)0; const int csum_size = sizeof(crc); char result[csum_size]; /* * The super_block structure does not span the whole * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space * is filled with zeros and is included in the checkum. */ crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE, crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE); btrfs_csum_final(crc, result); if (memcmp(raw_disk_sb, result, csum_size)) ret = 1; if (ret && btrfs_super_generation(disk_sb) < 10) { printk(KERN_WARNING "btrfs: super block crcs don't match, older mkfs detected\n"); ret = 0; } } if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) { printk(KERN_ERR "btrfs: unsupported checksum algorithm %u\n", csum_type); ret = 1; } return ret; } /* * helper to read a given tree block, doing retries as required when * the checksums don't match and we have alternate mirrors to try. */ static int btree_read_extent_buffer_pages(struct btrfs_root *root, struct extent_buffer *eb, u64 start, u64 parent_transid) { struct extent_io_tree *io_tree; int failed = 0; int ret; int num_copies = 0; int mirror_num = 0; int failed_mirror = 0; clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree; while (1) { ret = read_extent_buffer_pages(io_tree, eb, start, WAIT_COMPLETE, btree_get_extent, mirror_num); if (!ret) { if (!verify_parent_transid(io_tree, eb, parent_transid, 0)) break; else ret = -EIO; } /* * This buffer's crc is fine, but its contents are corrupted, so * there is no reason to read the other copies, they won't be * any less wrong. */ if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags)) break; num_copies = btrfs_num_copies(root->fs_info, eb->start, eb->len); if (num_copies == 1) break; if (!failed_mirror) { failed = 1; failed_mirror = eb->read_mirror; } mirror_num++; if (mirror_num == failed_mirror) mirror_num++; if (mirror_num > num_copies) break; } if (failed && !ret && failed_mirror) repair_eb_io_failure(root, eb, failed_mirror); return ret; } /* * checksum a dirty tree block before IO. This has extra checks to make sure * we only fill in the checksum field in the first page of a multi-page block */ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page) { struct extent_io_tree *tree; u64 start = page_offset(page); u64 found_start; struct extent_buffer *eb; tree = &BTRFS_I(page->mapping->host)->io_tree; eb = (struct extent_buffer *)page->private; if (page != eb->pages[0]) return 0; found_start = btrfs_header_bytenr(eb); if (found_start != start) { WARN_ON(1); return 0; } if (!PageUptodate(page)) { WARN_ON(1); return 0; } csum_tree_block(root, eb, 0); return 0; } static int check_tree_block_fsid(struct btrfs_root *root, struct extent_buffer *eb) { struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; u8 fsid[BTRFS_UUID_SIZE]; int ret = 1; read_extent_buffer(eb, fsid, (unsigned long)btrfs_header_fsid(eb), BTRFS_FSID_SIZE); while (fs_devices) { if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) { ret = 0; break; } fs_devices = fs_devices->seed; } return ret; } #define CORRUPT(reason, eb, root, slot) \ printk(KERN_CRIT "btrfs: corrupt leaf, %s: block=%llu," \ "root=%llu, slot=%d\n", reason, \ (unsigned long long)btrfs_header_bytenr(eb), \ (unsigned long long)root->objectid, slot) static noinline int check_leaf(struct btrfs_root *root, struct extent_buffer *leaf) { struct btrfs_key key; struct btrfs_key leaf_key; u32 nritems = btrfs_header_nritems(leaf); int slot; if (nritems == 0) return 0; /* Check the 0 item */ if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) != BTRFS_LEAF_DATA_SIZE(root)) { CORRUPT("invalid item offset size pair", leaf, root, 0); return -EIO; } /* * Check to make sure each items keys are in the correct order and their * offsets make sense. We only have to loop through nritems-1 because * we check the current slot against the next slot, which verifies the * next slot's offset+size makes sense and that the current's slot * offset is correct. */ for (slot = 0; slot < nritems - 1; slot++) { btrfs_item_key_to_cpu(leaf, &leaf_key, slot); btrfs_item_key_to_cpu(leaf, &key, slot + 1); /* Make sure the keys are in the right order */ if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) { CORRUPT("bad key order", leaf, root, slot); return -EIO; } /* * Make sure the offset and ends are right, remember that the * item data starts at the end of the leaf and grows towards the * front. */ if (btrfs_item_offset_nr(leaf, slot) != btrfs_item_end_nr(leaf, slot + 1)) { CORRUPT("slot offset bad", leaf, root, slot); return -EIO; } /* * Check to make sure that we don't point outside of the leaf, * just incase all the items are consistent to eachother, but * all point outside of the leaf. */ if (btrfs_item_end_nr(leaf, slot) > BTRFS_LEAF_DATA_SIZE(root)) { CORRUPT("slot end outside of leaf", leaf, root, slot); return -EIO; } } return 0; } static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, struct extent_state *state, int mirror) { struct extent_io_tree *tree; u64 found_start; int found_level; struct extent_buffer *eb; struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; int ret = 0; int reads_done; if (!page->private) goto out; tree = &BTRFS_I(page->mapping->host)->io_tree; eb = (struct extent_buffer *)page->private; /* the pending IO might have been the only thing that kept this buffer * in memory. Make sure we have a ref for all this other checks */ extent_buffer_get(eb); reads_done = atomic_dec_and_test(&eb->io_pages); if (!reads_done) goto err; eb->read_mirror = mirror; if (test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) { ret = -EIO; goto err; } found_start = btrfs_header_bytenr(eb); if (found_start != eb->start) { printk_ratelimited(KERN_INFO "btrfs bad tree block start " "%llu %llu\n", (unsigned long long)found_start, (unsigned long long)eb->start); ret = -EIO; goto err; } if (check_tree_block_fsid(root, eb)) { printk_ratelimited(KERN_INFO "btrfs bad fsid on block %llu\n", (unsigned long long)eb->start); ret = -EIO; goto err; } found_level = btrfs_header_level(eb); if (found_level >= BTRFS_MAX_LEVEL) { btrfs_info(root->fs_info, "bad tree block level %d\n", (int)btrfs_header_level(eb)); ret = -EIO; goto err; } btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), eb, found_level); ret = csum_tree_block(root, eb, 1); if (ret) { ret = -EIO; goto err; } /* * If this is a leaf block and it is corrupt, set the corrupt bit so * that we don't try and read the other copies of this block, just * return -EIO. */ if (found_level == 0 && check_leaf(root, eb)) { set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); ret = -EIO; } if (!ret) set_extent_buffer_uptodate(eb); err: if (reads_done && test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) btree_readahead_hook(root, eb, eb->start, ret); if (ret) { /* * our io error hook is going to dec the io pages * again, we have to make sure it has something * to decrement */ atomic_inc(&eb->io_pages); clear_extent_buffer_uptodate(eb); } free_extent_buffer(eb); out: return ret; } static int btree_io_failed_hook(struct page *page, int failed_mirror) { struct extent_buffer *eb; struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; eb = (struct extent_buffer *)page->private; set_bit(EXTENT_BUFFER_IOERR, &eb->bflags); eb->read_mirror = failed_mirror; atomic_dec(&eb->io_pages); if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) btree_readahead_hook(root, eb, eb->start, -EIO); return -EIO; /* we fixed nothing */ } static void end_workqueue_bio(struct bio *bio, int err) { struct end_io_wq *end_io_wq = bio->bi_private; struct btrfs_fs_info *fs_info; fs_info = end_io_wq->info; end_io_wq->error = err; end_io_wq->work.func = end_workqueue_fn; end_io_wq->work.flags = 0; if (bio->bi_rw & REQ_WRITE) { if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) btrfs_queue_worker(&fs_info->endio_meta_write_workers, &end_io_wq->work); else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) btrfs_queue_worker(&fs_info->endio_freespace_worker, &end_io_wq->work); else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) btrfs_queue_worker(&fs_info->endio_raid56_workers, &end_io_wq->work); else btrfs_queue_worker(&fs_info->endio_write_workers, &end_io_wq->work); } else { if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) btrfs_queue_worker(&fs_info->endio_raid56_workers, &end_io_wq->work); else if (end_io_wq->metadata) btrfs_queue_worker(&fs_info->endio_meta_workers, &end_io_wq->work); else btrfs_queue_worker(&fs_info->endio_workers, &end_io_wq->work); } } /* * For the metadata arg you want * * 0 - if data * 1 - if normal metadta * 2 - if writing to the free space cache area * 3 - raid parity work */ int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, int metadata) { struct end_io_wq *end_io_wq; end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS); if (!end_io_wq) return -ENOMEM; end_io_wq->private = bio->bi_private; end_io_wq->end_io = bio->bi_end_io; end_io_wq->info = info; end_io_wq->error = 0; end_io_wq->bio = bio; end_io_wq->metadata = metadata; bio->bi_private = end_io_wq; bio->bi_end_io = end_workqueue_bio; return 0; } unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info) { unsigned long limit = min_t(unsigned long, info->workers.max_workers, info->fs_devices->open_devices); return 256 * limit; } static void run_one_async_start(struct btrfs_work *work) { struct async_submit_bio *async; int ret; async = container_of(work, struct async_submit_bio, work); ret = async->submit_bio_start(async->inode, async->rw, async->bio, async->mirror_num, async->bio_flags, async->bio_offset); if (ret) async->error = ret; } static void run_one_async_done(struct btrfs_work *work) { struct btrfs_fs_info *fs_info; struct async_submit_bio *async; int limit; async = container_of(work, struct async_submit_bio, work); fs_info = BTRFS_I(async->inode)->root->fs_info; limit = btrfs_async_submit_limit(fs_info); limit = limit * 2 / 3; if (atomic_dec_return(&fs_info->nr_async_submits) < limit && waitqueue_active(&fs_info->async_submit_wait)) wake_up(&fs_info->async_submit_wait); /* If an error occured we just want to clean up the bio and move on */ if (async->error) { bio_endio(async->bio, async->error); return; } async->submit_bio_done(async->inode, async->rw, async->bio, async->mirror_num, async->bio_flags, async->bio_offset); } static void run_one_async_free(struct btrfs_work *work) { struct async_submit_bio *async; async = container_of(work, struct async_submit_bio, work); kfree(async); } int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, int rw, struct bio *bio, int mirror_num, unsigned long bio_flags, u64 bio_offset, extent_submit_bio_hook_t *submit_bio_start, extent_submit_bio_hook_t *submit_bio_done) { struct async_submit_bio *async; async = kmalloc(sizeof(*async), GFP_NOFS); if (!async) return -ENOMEM; async->inode = inode; async->rw = rw; async->bio = bio; async->mirror_num = mirror_num; async->submit_bio_start = submit_bio_start; async->submit_bio_done = submit_bio_done; async->work.func = run_one_async_start; async->work.ordered_func = run_one_async_done; async->work.ordered_free = run_one_async_free; async->work.flags = 0; async->bio_flags = bio_flags; async->bio_offset = bio_offset; async->error = 0; atomic_inc(&fs_info->nr_async_submits); if (rw & REQ_SYNC) btrfs_set_work_high_prio(&async->work); btrfs_queue_worker(&fs_info->workers, &async->work); while (atomic_read(&fs_info->async_submit_draining) && atomic_read(&fs_info->nr_async_submits)) { wait_event(fs_info->async_submit_wait, (atomic_read(&fs_info->nr_async_submits) == 0)); } return 0; } static int btree_csum_one_bio(struct bio *bio) { struct bio_vec *bvec = bio->bi_io_vec; int bio_index = 0; struct btrfs_root *root; int ret = 0; WARN_ON(bio->bi_vcnt <= 0); while (bio_index < bio->bi_vcnt) { root = BTRFS_I(bvec->bv_page->mapping->host)->root; ret = csum_dirty_buffer(root, bvec->bv_page); if (ret) break; bio_index++; bvec++; } return ret; } static int __btree_submit_bio_start(struct inode *inode, int rw, struct bio *bio, int mirror_num, unsigned long bio_flags, u64 bio_offset) { /* * when we're called for a write, we're already in the async * submission context. Just jump into btrfs_map_bio */ return btree_csum_one_bio(bio); } static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio, int mirror_num, unsigned long bio_flags, u64 bio_offset) { int ret; /* * when we're called for a write, we're already in the async * submission context. Just jump into btrfs_map_bio */ ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1); if (ret) bio_endio(bio, ret); return ret; } static int check_async_write(struct inode *inode, unsigned long bio_flags) { if (bio_flags & EXTENT_BIO_TREE_LOG) return 0; #ifdef CONFIG_X86 if (cpu_has_xmm4_2) return 0; #endif return 1; } static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, int mirror_num, unsigned long bio_flags, u64 bio_offset) { int async = check_async_write(inode, bio_flags); int ret; if (!(rw & REQ_WRITE)) { /* * called for a read, do the setup so that checksum validation * can happen in the async kernel threads */ ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info, bio, 1); if (ret) goto out_w_error; ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 0); } else if (!async) { ret = btree_csum_one_bio(bio); if (ret) goto out_w_error; ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 0); } else { /* * kthread helpers are used to submit writes so that * checksumming can happen in parallel across all CPUs */ ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, inode, rw, bio, mirror_num, 0, bio_offset, __btree_submit_bio_start, __btree_submit_bio_done); } if (ret) { out_w_error: bio_endio(bio, ret); } return ret; } #ifdef CONFIG_MIGRATION static int btree_migratepage(struct address_space *mapping, struct page *newpage, struct page *page, enum migrate_mode mode) { /* * we can't safely write a btree page from here, * we haven't done the locking hook */ if (PageDirty(page)) return -EAGAIN; /* * Buffers may be managed in a filesystem specific way. * We must have no buffers or drop them. */ if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL)) return -EAGAIN; return migrate_page(mapping, newpage, page, mode); } #endif static int btree_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct extent_io_tree *tree; struct btrfs_fs_info *fs_info; int ret; tree = &BTRFS_I(mapping->host)->io_tree; if (wbc->sync_mode == WB_SYNC_NONE) { if (wbc->for_kupdate) return 0; fs_info = BTRFS_I(mapping->host)->root->fs_info; /* this is a bit racy, but that's ok */ ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes, BTRFS_DIRTY_METADATA_THRESH); if (ret < 0) return 0; } return btree_write_cache_pages(mapping, wbc); } static int btree_readpage(struct file *file, struct page *page) { struct extent_io_tree *tree; tree = &BTRFS_I(page->mapping->host)->io_tree; return extent_read_full_page(tree, page, btree_get_extent, 0); } static int btree_releasepage(struct page *page, gfp_t gfp_flags) { if (PageWriteback(page) || PageDirty(page)) return 0; return try_release_extent_buffer(page); } static void btree_invalidatepage(struct page *page, unsigned long offset) { struct extent_io_tree *tree; tree = &BTRFS_I(page->mapping->host)->io_tree; extent_invalidatepage(tree, page, offset); btree_releasepage(page, GFP_NOFS); if (PagePrivate(page)) { printk(KERN_WARNING "btrfs warning page private not zero " "on page %llu\n", (unsigned long long)page_offset(page)); ClearPagePrivate(page); set_page_private(page, 0); page_cache_release(page); } } static int btree_set_page_dirty(struct page *page) { #ifdef DEBUG struct extent_buffer *eb; BUG_ON(!PagePrivate(page)); eb = (struct extent_buffer *)page->private; BUG_ON(!eb); BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); BUG_ON(!atomic_read(&eb->refs)); btrfs_assert_tree_locked(eb); #endif return __set_page_dirty_nobuffers(page); } static const struct address_space_operations btree_aops = { .readpage = btree_readpage, .writepages = btree_writepages, .releasepage = btree_releasepage, .invalidatepage = btree_invalidatepage, #ifdef CONFIG_MIGRATION .migratepage = btree_migratepage, #endif .set_page_dirty = btree_set_page_dirty, }; int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize, u64 parent_transid) { struct extent_buffer *buf = NULL; struct inode *btree_inode = root->fs_info->btree_inode; int ret = 0; buf = btrfs_find_create_tree_block(root, bytenr, blocksize); if (!buf) return 0; read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, buf, 0, WAIT_NONE, btree_get_extent, 0); free_extent_buffer(buf); return ret; } int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize, int mirror_num, struct extent_buffer **eb) { struct extent_buffer *buf = NULL; struct inode *btree_inode = root->fs_info->btree_inode; struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree; int ret; buf = btrfs_find_create_tree_block(root, bytenr, blocksize); if (!buf) return 0; set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags); ret = read_extent_buffer_pages(io_tree, buf, 0, WAIT_PAGE_LOCK, btree_get_extent, mirror_num); if (ret) { free_extent_buffer(buf); return ret; } if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) { free_extent_buffer(buf); return -EIO; } else if (extent_buffer_uptodate(buf)) { *eb = buf; } else { free_extent_buffer(buf); } return 0; } struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize) { struct inode *btree_inode = root->fs_info->btree_inode; struct extent_buffer *eb; eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree, bytenr, blocksize); return eb; } struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize) { struct inode *btree_inode = root->fs_info->btree_inode; struct extent_buffer *eb; eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree, bytenr, blocksize); return eb; } int btrfs_write_tree_block(struct extent_buffer *buf) { return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start, buf->start + buf->len - 1); } int btrfs_wait_tree_block_writeback(struct extent_buffer *buf) { return filemap_fdatawait_range(buf->pages[0]->mapping, buf->start, buf->start + buf->len - 1); } struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize, u64 parent_transid) { struct extent_buffer *buf = NULL; int ret; buf = btrfs_find_create_tree_block(root, bytenr, blocksize); if (!buf) return NULL; ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid); return buf; } void clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf) { struct btrfs_fs_info *fs_info = root->fs_info; if (btrfs_header_generation(buf) == fs_info->running_transaction->transid) { btrfs_assert_tree_locked(buf); if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) { __percpu_counter_add(&fs_info->dirty_metadata_bytes, -buf->len, fs_info->dirty_metadata_batch); /* ugh, clear_extent_buffer_dirty needs to lock the page */ btrfs_set_lock_blocking(buf); clear_extent_buffer_dirty(buf); } } } static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, u32 stripesize, struct btrfs_root *root, struct btrfs_fs_info *fs_info, u64 objectid) { root->node = NULL; root->commit_root = NULL; root->sectorsize = sectorsize; root->nodesize = nodesize; root->leafsize = leafsize; root->stripesize = stripesize; root->ref_cows = 0; root->track_dirty = 0; root->in_radix = 0; root->orphan_item_inserted = 0; root->orphan_cleanup_state = 0; root->objectid = objectid; root->last_trans = 0; root->highest_objectid = 0; root->name = NULL; root->inode_tree = RB_ROOT; INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC); root->block_rsv = NULL; root->orphan_block_rsv = NULL; INIT_LIST_HEAD(&root->dirty_list); INIT_LIST_HEAD(&root->root_list); INIT_LIST_HEAD(&root->logged_list[0]); INIT_LIST_HEAD(&root->logged_list[1]); spin_lock_init(&root->orphan_lock); spin_lock_init(&root->inode_lock); spin_lock_init(&root->accounting_lock); spin_lock_init(&root->log_extents_lock[0]); spin_lock_init(&root->log_extents_lock[1]); mutex_init(&root->objectid_mutex); mutex_init(&root->log_mutex); init_waitqueue_head(&root->log_writer_wait); init_waitqueue_head(&root->log_commit_wait[0]); init_waitqueue_head(&root->log_commit_wait[1]); atomic_set(&root->log_commit[0], 0); atomic_set(&root->log_commit[1], 0); atomic_set(&root->log_writers, 0); atomic_set(&root->log_batch, 0); atomic_set(&root->orphan_inodes, 0); root->log_transid = 0; root->last_log_commit = 0; extent_io_tree_init(&root->dirty_log_pages, fs_info->btree_inode->i_mapping); memset(&root->root_key, 0, sizeof(root->root_key)); memset(&root->root_item, 0, sizeof(root->root_item)); memset(&root->defrag_progress, 0, sizeof(root->defrag_progress)); memset(&root->root_kobj, 0, sizeof(root->root_kobj)); root->defrag_trans_start = fs_info->generation; init_completion(&root->kobj_unregister); root->defrag_running = 0; root->root_key.objectid = objectid; root->anon_dev = 0; spin_lock_init(&root->root_item_lock); } static int __must_check find_and_setup_root(struct btrfs_root *tree_root, struct btrfs_fs_info *fs_info, u64 objectid, struct btrfs_root *root) { int ret; u32 blocksize; u64 generation; __setup_root(tree_root->nodesize, tree_root->leafsize, tree_root->sectorsize, tree_root->stripesize, root, fs_info, objectid); ret = btrfs_find_last_root(tree_root, objectid, &root->root_item, &root->root_key); if (ret > 0) return -ENOENT; else if (ret < 0) return ret; generation = btrfs_root_generation(&root->root_item); blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item)); root->commit_root = NULL; root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item), blocksize, generation); if (!root->node || !btrfs_buffer_uptodate(root->node, generation, 0)) { free_extent_buffer(root->node); root->node = NULL; return -EIO; } root->commit_root = btrfs_root_node(root); return 0; } static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info) { struct btrfs_root *root = kzalloc(sizeof(*root), GFP_NOFS); if (root) root->fs_info = fs_info; return root; } struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, u64 objectid) { struct extent_buffer *leaf; struct btrfs_root *tree_root = fs_info->tree_root; struct btrfs_root *root; struct btrfs_key key; int ret = 0; u64 bytenr; uuid_le uuid; root = btrfs_alloc_root(fs_info); if (!root) return ERR_PTR(-ENOMEM); __setup_root(tree_root->nodesize, tree_root->leafsize, tree_root->sectorsize, tree_root->stripesize, root, fs_info, objectid); root->root_key.objectid = objectid; root->root_key.type = BTRFS_ROOT_ITEM_KEY; root->root_key.offset = 0; leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0, objectid, NULL, 0, 0, 0); if (IS_ERR(leaf)) { ret = PTR_ERR(leaf); leaf = NULL; goto fail; } bytenr = leaf->start; memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header)); btrfs_set_header_bytenr(leaf, leaf->start); btrfs_set_header_generation(leaf, trans->transid); btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV); btrfs_set_header_owner(leaf, objectid); root->node = leaf; write_extent_buffer(leaf, fs_info->fsid, (unsigned long)btrfs_header_fsid(leaf), BTRFS_FSID_SIZE); write_extent_buffer(leaf, fs_info->chunk_tree_uuid, (unsigned long)btrfs_header_chunk_tree_uuid(leaf), BTRFS_UUID_SIZE); btrfs_mark_buffer_dirty(leaf); root->commit_root = btrfs_root_node(root); root->track_dirty = 1; root->root_item.flags = 0; root->root_item.byte_limit = 0; btrfs_set_root_bytenr(&root->root_item, leaf->start); btrfs_set_root_generation(&root->root_item, trans->transid); btrfs_set_root_level(&root->root_item, 0); btrfs_set_root_refs(&root->root_item, 1); btrfs_set_root_used(&root->root_item, leaf->len); btrfs_set_root_last_snapshot(&root->root_item, 0); btrfs_set_root_dirid(&root->root_item, 0); uuid_le_gen(&uuid); memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE); root->root_item.drop_level = 0; key.objectid = objectid; key.type = BTRFS_ROOT_ITEM_KEY; key.offset = 0; ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item); if (ret) goto fail; btrfs_tree_unlock(leaf); return root; fail: if (leaf) { btrfs_tree_unlock(leaf); free_extent_buffer(leaf); } kfree(root); return ERR_PTR(ret); } static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info) { struct btrfs_root *root; struct btrfs_root *tree_root = fs_info->tree_root; struct extent_buffer *leaf; root = btrfs_alloc_root(fs_info); if (!root) return ERR_PTR(-ENOMEM); __setup_root(tree_root->nodesize, tree_root->leafsize, tree_root->sectorsize, tree_root->stripesize, root, fs_info, BTRFS_TREE_LOG_OBJECTID); root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID; root->root_key.type = BTRFS_ROOT_ITEM_KEY; root->root_key.offset = BTRFS_TREE_LOG_OBJECTID; /* * log trees do not get reference counted because they go away * before a real commit is actually done. They do store pointers * to file data extents, and those reference counts still get * updated (along with back refs to the log tree). */ root->ref_cows = 0; leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0, BTRFS_TREE_LOG_OBJECTID, NULL, 0, 0, 0); if (IS_ERR(leaf)) { kfree(root); return ERR_CAST(leaf); } memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header)); btrfs_set_header_bytenr(leaf, leaf->start); btrfs_set_header_generation(leaf, trans->transid); btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV); btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID); root->node = leaf; write_extent_buffer(root->node, root->fs_info->fsid, (unsigned long)btrfs_header_fsid(root->node), BTRFS_FSID_SIZE); btrfs_mark_buffer_dirty(root->node); btrfs_tree_unlock(root->node); return root; } int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info) { struct btrfs_root *log_root; log_root = alloc_log_tree(trans, fs_info); if (IS_ERR(log_root)) return PTR_ERR(log_root); WARN_ON(fs_info->log_root_tree); fs_info->log_root_tree = log_root; return 0; } int btrfs_add_log_tree(struct btrfs_trans_handle *trans, struct btrfs_root *root) { struct btrfs_root *log_root; struct btrfs_inode_item *inode_item; log_root = alloc_log_tree(trans, root->fs_info); if (IS_ERR(log_root)) return PTR_ERR(log_root); log_root->last_trans = trans->transid; log_root->root_key.offset = root->root_key.objectid; inode_item = &log_root->root_item.inode; inode_item->generation = cpu_to_le64(1); inode_item->size = cpu_to_le64(3); inode_item->nlink = cpu_to_le32(1); inode_item->nbytes = cpu_to_le64(root->leafsize); inode_item->mode = cpu_to_le32(S_IFDIR | 0755); btrfs_set_root_node(&log_root->root_item, log_root->node); WARN_ON(root->log_root); root->log_root = log_root; root->log_transid = 0; root->last_log_commit = 0; return 0; } struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root, struct btrfs_key *location) { struct btrfs_root *root; struct btrfs_fs_info *fs_info = tree_root->fs_info; struct btrfs_path *path; struct extent_buffer *l; u64 generation; u32 blocksize; int ret = 0; int slot; root = btrfs_alloc_root(fs_info); if (!root) return ERR_PTR(-ENOMEM); if (location->offset == (u64)-1) { ret = find_and_setup_root(tree_root, fs_info, location->objectid, root); if (ret) { kfree(root); return ERR_PTR(ret); } goto out; } __setup_root(tree_root->nodesize, tree_root->leafsize, tree_root->sectorsize, tree_root->stripesize, root, fs_info, location->objectid); path = btrfs_alloc_path(); if (!path) { kfree(root); return ERR_PTR(-ENOMEM); } ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0); if (ret == 0) { l = path->nodes[0]; slot = path->slots[0]; btrfs_read_root_item(l, slot, &root->root_item); memcpy(&root->root_key, location, sizeof(*location)); } btrfs_free_path(path); if (ret) { kfree(root); if (ret > 0) ret = -ENOENT; return ERR_PTR(ret); } generation = btrfs_root_generation(&root->root_item); blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item)); root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item), blocksize, generation); if (!root->node || !extent_buffer_uptodate(root->node)) { ret = (!root->node) ? -ENOMEM : -EIO; free_extent_buffer(root->node); kfree(root); return ERR_PTR(ret); } root->commit_root = btrfs_root_node(root); out: if (location->objectid != BTRFS_TREE_LOG_OBJECTID) { root->ref_cows = 1; btrfs_check_and_init_root_item(&root->root_item); } return root; } struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info, struct btrfs_key *location) { struct btrfs_root *root; int ret; if (location->objectid == BTRFS_ROOT_TREE_OBJECTID) return fs_info->tree_root; if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID) return fs_info->extent_root; if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID) return fs_info->chunk_root; if (location->objectid == BTRFS_DEV_TREE_OBJECTID) return fs_info->dev_root; if (location->objectid == BTRFS_CSUM_TREE_OBJECTID) return fs_info->csum_root; if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID) return fs_info->quota_root ? fs_info->quota_root : ERR_PTR(-ENOENT); again: spin_lock(&fs_info->fs_roots_radix_lock); root = radix_tree_lookup(&fs_info->fs_roots_radix, (unsigned long)location->objectid); spin_unlock(&fs_info->fs_roots_radix_lock); if (root) return root; root = btrfs_read_fs_root_no_radix(fs_info->tree_root, location); if (IS_ERR(root)) return root; root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS); root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned), GFP_NOFS); if (!root->free_ino_pinned || !root->free_ino_ctl) { ret = -ENOMEM; goto fail; } btrfs_init_free_ino_ctl(root); mutex_init(&root->fs_commit_mutex); spin_lock_init(&root->cache_lock); init_waitqueue_head(&root->cache_wait); ret = get_anon_bdev(&root->anon_dev); if (ret) goto fail; if (btrfs_root_refs(&root->root_item) == 0) { ret = -ENOENT; goto fail; } ret = btrfs_find_orphan_item(fs_info->tree_root, location->objectid); if (ret < 0) goto fail; if (ret == 0) root->orphan_item_inserted = 1; ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM); if (ret) goto fail; spin_lock(&fs_info->fs_roots_radix_lock); ret = radix_tree_insert(&fs_info->fs_roots_radix, (unsigned long)root->root_key.objectid, root); if (ret == 0) root->in_radix = 1; spin_unlock(&fs_info->fs_roots_radix_lock); radix_tree_preload_end(); if (ret) { if (ret == -EEXIST) { free_fs_root(root); goto again; } goto fail; } ret = btrfs_find_dead_roots(fs_info->tree_root, root->root_key.objectid); WARN_ON(ret); return root; fail: free_fs_root(root); return ERR_PTR(ret); } static int btrfs_congested_fn(void *congested_data, int bdi_bits) { struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data; int ret = 0; struct btrfs_device *device; struct backing_dev_info *bdi; rcu_read_lock(); list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) { if (!device->bdev) continue; bdi = blk_get_backing_dev_info(device->bdev); if (bdi && bdi_congested(bdi, bdi_bits)) { ret = 1; break; } } rcu_read_unlock(); return ret; } /* * If this fails, caller must call bdi_destroy() to get rid of the * bdi again. */ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi) { int err; bdi->capabilities = BDI_CAP_MAP_COPY; err = bdi_setup_and_register(bdi, "btrfs", BDI_CAP_MAP_COPY); if (err) return err; bdi->ra_pages = default_backing_dev_info.ra_pages; bdi->congested_fn = btrfs_congested_fn; bdi->congested_data = info; return 0; } /* * called by the kthread helper functions to finally call the bio end_io * functions. This is where read checksum verification actually happens */ static void end_workqueue_fn(struct btrfs_work *work) { struct bio *bio; struct end_io_wq *end_io_wq; struct btrfs_fs_info *fs_info; int error; end_io_wq = container_of(work, struct end_io_wq, work); bio = end_io_wq->bio; fs_info = end_io_wq->info; error = end_io_wq->error; bio->bi_private = end_io_wq->private; bio->bi_end_io = end_io_wq->end_io; kfree(end_io_wq); bio_endio(bio, error); } static int cleaner_kthread(void *arg) { struct btrfs_root *root = arg; do { int again = 0; if (!(root->fs_info->sb->s_flags & MS_RDONLY) && down_read_trylock(&root->fs_info->sb->s_umount)) { if (mutex_trylock(&root->fs_info->cleaner_mutex)) { btrfs_run_delayed_iputs(root); again = btrfs_clean_one_deleted_snapshot(root); mutex_unlock(&root->fs_info->cleaner_mutex); } btrfs_run_defrag_inodes(root->fs_info); up_read(&root->fs_info->sb->s_umount); } if (!try_to_freeze() && !again) { set_current_state(TASK_INTERRUPTIBLE); if (!kthread_should_stop()) schedule(); __set_current_state(TASK_RUNNING); } } while (!kthread_should_stop()); return 0; } static int transaction_kthread(void *arg) { struct btrfs_root *root = arg; struct btrfs_trans_handle *trans; struct btrfs_transaction *cur; u64 transid; unsigned long now; unsigned long delay; bool cannot_commit; do { cannot_commit = false; delay = HZ * 30; mutex_lock(&root->fs_info->transaction_kthread_mutex); spin_lock(&root->fs_info->trans_lock); cur = root->fs_info->running_transaction; if (!cur) { spin_unlock(&root->fs_info->trans_lock); goto sleep; } now = get_seconds(); if (!cur->blocked && (now < cur->start_time || now - cur->start_time < 30)) { spin_unlock(&root->fs_info->trans_lock); delay = HZ * 5; goto sleep; } transid = cur->transid; spin_unlock(&root->fs_info->trans_lock); /* If the file system is aborted, this will always fail. */ trans = btrfs_attach_transaction(root); if (IS_ERR(trans)) { if (PTR_ERR(trans) != -ENOENT) cannot_commit = true; goto sleep; } if (transid == trans->transid) { btrfs_commit_transaction(trans, root); } else { btrfs_end_transaction(trans, root); } sleep: wake_up_process(root->fs_info->cleaner_kthread); mutex_unlock(&root->fs_info->transaction_kthread_mutex); if (!try_to_freeze()) { set_current_state(TASK_INTERRUPTIBLE); if (!kthread_should_stop() && (!btrfs_transaction_blocked(root->fs_info) || cannot_commit)) schedule_timeout(delay); __set_current_state(TASK_RUNNING); } } while (!kthread_should_stop()); return 0; } /* * this will find the highest generation in the array of * root backups. The index of the highest array is returned, * or -1 if we can't find anything. * * We check to make sure the array is valid by comparing the * generation of the latest root in the array with the generation * in the super block. If they don't match we pitch it. */ static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen) { u64 cur; int newest_index = -1; struct btrfs_root_backup *root_backup; int i; for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) { root_backup = info->super_copy->super_roots + i; cur = btrfs_backup_tree_root_gen(root_backup); if (cur == newest_gen) newest_index = i; } /* check to see if we actually wrapped around */ if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) { root_backup = info->super_copy->super_roots; cur = btrfs_backup_tree_root_gen(root_backup); if (cur == newest_gen) newest_index = 0; } return newest_index; } /* * find the oldest backup so we know where to store new entries * in the backup array. This will set the backup_root_index * field in the fs_info struct */ static void find_oldest_super_backup(struct btrfs_fs_info *info, u64 newest_gen) { int newest_index = -1; newest_index = find_newest_super_backup(info, newest_gen); /* if there was garbage in there, just move along */ if (newest_index == -1) { info->backup_root_index = 0; } else { info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS; } } /* * copy all the root pointers into the super backup array. * this will bump the backup pointer by one when it is * done */ static void backup_super_roots(struct btrfs_fs_info *info) { int next_backup; struct btrfs_root_backup *root_backup; int last_backup; next_backup = info->backup_root_index; last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) % BTRFS_NUM_BACKUP_ROOTS; /* * just overwrite the last backup if we're at the same generation * this happens only at umount */ root_backup = info->super_for_commit->super_roots + last_backup; if (btrfs_backup_tree_root_gen(root_backup) == btrfs_header_generation(info->tree_root->node)) next_backup = last_backup; root_backup = info->super_for_commit->super_roots + next_backup; /* * make sure all of our padding and empty slots get zero filled * regardless of which ones we use today */ memset(root_backup, 0, sizeof(*root_backup)); info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS; btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start); btrfs_set_backup_tree_root_gen(root_backup, btrfs_header_generation(info->tree_root->node)); btrfs_set_backup_tree_root_level(root_backup, btrfs_header_level(info->tree_root->node)); btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start); btrfs_set_backup_chunk_root_gen(root_backup, btrfs_header_generation(info->chunk_root->node)); btrfs_set_backup_chunk_root_level(root_backup, btrfs_header_level(info->chunk_root->node)); btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start); btrfs_set_backup_extent_root_gen(root_backup, btrfs_header_generation(info->extent_root->node)); btrfs_set_backup_extent_root_level(root_backup, btrfs_header_level(info->extent_root->node)); /* * we might commit during log recovery, which happens before we set * the fs_root. Make sure it is valid before we fill it in. */ if (info->fs_root && info->fs_root->node) { btrfs_set_backup_fs_root(root_backup, info->fs_root->node->start); btrfs_set_backup_fs_root_gen(root_backup, btrfs_header_generation(info->fs_root->node)); btrfs_set_backup_fs_root_level(root_backup, btrfs_header_level(info->fs_root->node)); } btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start); btrfs_set_backup_dev_root_gen(root_backup, btrfs_header_generation(info->dev_root->node)); btrfs_set_backup_dev_root_level(root_backup, btrfs_header_level(info->dev_root->node)); btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start); btrfs_set_backup_csum_root_gen(root_backup, btrfs_header_generation(info->csum_root->node)); btrfs_set_backup_csum_root_level(root_backup, btrfs_header_level(info->csum_root->node)); btrfs_set_backup_total_bytes(root_backup, btrfs_super_total_bytes(info->super_copy)); btrfs_set_backup_bytes_used(root_backup, btrfs_super_bytes_used(info->super_copy)); btrfs_set_backup_num_devices(root_backup, btrfs_super_num_devices(info->super_copy)); /* * if we don't copy this out to the super_copy, it won't get remembered * for the next commit */ memcpy(&info->super_copy->super_roots, &info->super_for_commit->super_roots, sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS); } /* * this copies info out of the root backup array and back into * the in-memory super block. It is meant to help iterate through * the array, so you send it the number of backups you've already * tried and the last backup index you used. * * this returns -1 when it has tried all the backups */ static noinline int next_root_backup(struct btrfs_fs_info *info, struct btrfs_super_block *super, int *num_backups_tried, int *backup_index) { struct btrfs_root_backup *root_backup; int newest = *backup_index; if (*num_backups_tried == 0) { u64 gen = btrfs_super_generation(super); newest = find_newest_super_backup(info, gen); if (newest == -1) return -1; *backup_index = newest; *num_backups_tried = 1; } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) { /* we've tried all the backups, all done */ return -1; } else { /* jump to the next oldest backup */ newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) % BTRFS_NUM_BACKUP_ROOTS; *backup_index = newest; *num_backups_tried += 1; } root_backup = super->super_roots + newest; btrfs_set_super_generation(super, btrfs_backup_tree_root_gen(root_backup)); btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup)); btrfs_set_super_root_level(super, btrfs_backup_tree_root_level(root_backup)); btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup)); /* * fixme: the total bytes and num_devices need to match or we should * need a fsck */ btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup)); btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup)); return 0; } /* helper to cleanup workers */ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info) { btrfs_stop_workers(&fs_info->generic_worker); btrfs_stop_workers(&fs_info->fixup_workers); btrfs_stop_workers(&fs_info->delalloc_workers); btrfs_stop_workers(&fs_info->workers); btrfs_stop_workers(&fs_info->endio_workers); btrfs_stop_workers(&fs_info->endio_meta_workers); btrfs_stop_workers(&fs_info->endio_raid56_workers); btrfs_stop_workers(&fs_info->rmw_workers); btrfs_stop_workers(&fs_info->endio_meta_write_workers); btrfs_stop_workers(&fs_info->endio_write_workers); btrfs_stop_workers(&fs_info->endio_freespace_worker); btrfs_stop_workers(&fs_info->submit_workers); btrfs_stop_workers(&fs_info->delayed_workers); btrfs_stop_workers(&fs_info->caching_workers); btrfs_stop_workers(&fs_info->readahead_workers); btrfs_stop_workers(&fs_info->flush_workers); btrfs_stop_workers(&fs_info->qgroup_rescan_workers); } /* helper to cleanup tree roots */ static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root) { free_extent_buffer(info->tree_root->node); free_extent_buffer(info->tree_root->commit_root); info->tree_root->node = NULL; info->tree_root->commit_root = NULL; if (info->dev_root) { free_extent_buffer(info->dev_root->node); free_extent_buffer(info->dev_root->commit_root); info->dev_root->node = NULL; info->dev_root->commit_root = NULL; } if (info->extent_root) { free_extent_buffer(info->extent_root->node); free_extent_buffer(info->extent_root->commit_root); info->extent_root->node = NULL; info->extent_root->commit_root = NULL; } if (info->csum_root) { free_extent_buffer(info->csum_root->node); free_extent_buffer(info->csum_root->commit_root); info->csum_root->node = NULL; info->csum_root->commit_root = NULL; } if (info->quota_root) { free_extent_buffer(info->quota_root->node); free_extent_buffer(info->quota_root->commit_root); info->quota_root->node = NULL; info->quota_root->commit_root = NULL; } if (chunk_root) { free_extent_buffer(info->chunk_root->node); free_extent_buffer(info->chunk_root->commit_root); info->chunk_root->node = NULL; info->chunk_root->commit_root = NULL; } } static void del_fs_roots(struct btrfs_fs_info *fs_info) { int ret; struct btrfs_root *gang[8]; int i; while (!list_empty(&fs_info->dead_roots)) { gang[0] = list_entry(fs_info->dead_roots.next, struct btrfs_root, root_list); list_del(&gang[0]->root_list); if (gang[0]->in_radix) { btrfs_free_fs_root(fs_info, gang[0]); } else { free_extent_buffer(gang[0]->node); free_extent_buffer(gang[0]->commit_root); kfree(gang[0]); } } while (1) { ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, (void **)gang, 0, ARRAY_SIZE(gang)); if (!ret) break; for (i = 0; i < ret; i++) btrfs_free_fs_root(fs_info, gang[i]); } } int open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices, char *options) { u32 sectorsize; u32 nodesize; u32 leafsize; u32 blocksize; u32 stripesize; u64 generation; u64 features; struct btrfs_key location; struct buffer_head *bh; struct btrfs_super_block *disk_super; struct btrfs_fs_info *fs_info = btrfs_sb(sb); struct btrfs_root *tree_root; struct btrfs_root *extent_root; struct btrfs_root *csum_root; struct btrfs_root *chunk_root; struct btrfs_root *dev_root; struct btrfs_root *quota_root; struct btrfs_root *log_tree_root; int ret; int err = -EINVAL; int num_backups_tried = 0; int backup_index = 0; tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info); extent_root = fs_info->extent_root = btrfs_alloc_root(fs_info); csum_root = fs_info->csum_root = btrfs_alloc_root(fs_info); chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info); dev_root = fs_info->dev_root = btrfs_alloc_root(fs_info); quota_root = fs_info->quota_root = btrfs_alloc_root(fs_info); if (!tree_root || !extent_root || !csum_root || !chunk_root || !dev_root || !quota_root) { err = -ENOMEM; goto fail; } ret = init_srcu_struct(&fs_info->subvol_srcu); if (ret) { err = ret; goto fail; } ret = setup_bdi(fs_info, &fs_info->bdi); if (ret) { err = ret; goto fail_srcu; } ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0); if (ret) { err = ret; goto fail_bdi; } fs_info->dirty_metadata_batch = PAGE_CACHE_SIZE * (1 + ilog2(nr_cpu_ids)); ret = percpu_counter_init(&fs_info->delalloc_bytes, 0); if (ret) { err = ret; goto fail_dirty_metadata_bytes; } fs_info->btree_inode = new_inode(sb); if (!fs_info->btree_inode) { err = -ENOMEM; goto fail_delalloc_bytes; } mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS); INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC); INIT_LIST_HEAD(&fs_info->trans_list); INIT_LIST_HEAD(&fs_info->dead_roots); INIT_LIST_HEAD(&fs_info->delayed_iputs); INIT_LIST_HEAD(&fs_info->delalloc_inodes); INIT_LIST_HEAD(&fs_info->caching_block_groups); spin_lock_init(&fs_info->delalloc_lock); spin_lock_init(&fs_info->trans_lock); spin_lock_init(&fs_info->fs_roots_radix_lock); spin_lock_init(&fs_info->delayed_iput_lock); spin_lock_init(&fs_info->defrag_inodes_lock); spin_lock_init(&fs_info->free_chunk_lock); spin_lock_init(&fs_info->tree_mod_seq_lock); spin_lock_init(&fs_info->super_lock); rwlock_init(&fs_info->tree_mod_log_lock); mutex_init(&fs_info->reloc_mutex); seqlock_init(&fs_info->profiles_lock); init_completion(&fs_info->kobj_unregister); INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots); INIT_LIST_HEAD(&fs_info->space_info); INIT_LIST_HEAD(&fs_info->tree_mod_seq_list); btrfs_mapping_init(&fs_info->mapping_tree); btrfs_init_block_rsv(&fs_info->global_block_rsv, BTRFS_BLOCK_RSV_GLOBAL); btrfs_init_block_rsv(&fs_info->delalloc_block_rsv, BTRFS_BLOCK_RSV_DELALLOC); btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS); btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK); btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY); btrfs_init_block_rsv(&fs_info->delayed_block_rsv, BTRFS_BLOCK_RSV_DELOPS); atomic_set(&fs_info->nr_async_submits, 0); atomic_set(&fs_info->async_delalloc_pages, 0); atomic_set(&fs_info->async_submit_draining, 0); atomic_set(&fs_info->nr_async_bios, 0); atomic_set(&fs_info->defrag_running, 0); atomic64_set(&fs_info->tree_mod_seq, 0); fs_info->sb = sb; fs_info->max_inline = 8192 * 1024; fs_info->metadata_ratio = 0; fs_info->defrag_inodes = RB_ROOT; fs_info->trans_no_join = 0; fs_info->free_chunk_space = 0; fs_info->tree_mod_log = RB_ROOT; /* readahead state */ INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT); spin_lock_init(&fs_info->reada_lock); fs_info->thread_pool_size = min_t(unsigned long, num_online_cpus() + 2, 8); INIT_LIST_HEAD(&fs_info->ordered_extents); spin_lock_init(&fs_info->ordered_extent_lock); fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root), GFP_NOFS); if (!fs_info->delayed_root) { err = -ENOMEM; goto fail_iput; } btrfs_init_delayed_root(fs_info->delayed_root); mutex_init(&fs_info->scrub_lock); atomic_set(&fs_info->scrubs_running, 0); atomic_set(&fs_info->scrub_pause_req, 0); atomic_set(&fs_info->scrubs_paused, 0); atomic_set(&fs_info->scrub_cancel_req, 0); init_waitqueue_head(&fs_info->scrub_pause_wait); init_rwsem(&fs_info->scrub_super_lock); fs_info->scrub_workers_refcnt = 0; #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY fs_info->check_integrity_print_mask = 0; #endif spin_lock_init(&fs_info->balance_lock); mutex_init(&fs_info->balance_mutex); atomic_set(&fs_info->balance_running, 0); atomic_set(&fs_info->balance_pause_req, 0); atomic_set(&fs_info->balance_cancel_req, 0); fs_info->balance_ctl = NULL; init_waitqueue_head(&fs_info->balance_wait_q); sb->s_blocksize = 4096; sb->s_blocksize_bits = blksize_bits(4096); sb->s_bdi = &fs_info->bdi; fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID; set_nlink(fs_info->btree_inode, 1); /* * we set the i_size on the btree inode to the max possible int. * the real end of the address space is determined by all of * the devices in the system */ fs_info->btree_inode->i_size = OFFSET_MAX; fs_info->btree_inode->i_mapping->a_ops = &btree_aops; fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi; RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node); extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree, fs_info->btree_inode->i_mapping); BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0; extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree); BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops; BTRFS_I(fs_info->btree_inode)->root = tree_root; memset(&BTRFS_I(fs_info->btree_inode)->location, 0, sizeof(struct btrfs_key)); set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(fs_info->btree_inode)->runtime_flags); insert_inode_hash(fs_info->btree_inode); spin_lock_init(&fs_info->block_group_cache_lock); fs_info->block_group_cache_tree = RB_ROOT; fs_info->first_logical_byte = (u64)-1; extent_io_tree_init(&fs_info->freed_extents[0], fs_info->btree_inode->i_mapping); extent_io_tree_init(&fs_info->freed_extents[1], fs_info->btree_inode->i_mapping); fs_info->pinned_extents = &fs_info->freed_extents[0]; fs_info->do_barriers = 1; mutex_init(&fs_info->ordered_operations_mutex); mutex_init(&fs_info->tree_log_mutex); mutex_init(&fs_info->chunk_mutex); mutex_init(&fs_info->transaction_kthread_mutex); mutex_init(&fs_info->cleaner_mutex); mutex_init(&fs_info->volume_mutex); init_rwsem(&fs_info->extent_commit_sem); init_rwsem(&fs_info->cleanup_work_sem); init_rwsem(&fs_info->subvol_sem); fs_info->dev_replace.lock_owner = 0; atomic_set(&fs_info->dev_replace.nesting_level, 0); mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount); mutex_init(&fs_info->dev_replace.lock_management_lock); mutex_init(&fs_info->dev_replace.lock); spin_lock_init(&fs_info->qgroup_lock); mutex_init(&fs_info->qgroup_ioctl_lock); fs_info->qgroup_tree = RB_ROOT; INIT_LIST_HEAD(&fs_info->dirty_qgroups); fs_info->qgroup_seq = 1; fs_info->quota_enabled = 0; fs_info->pending_quota_state = 0; mutex_init(&fs_info->qgroup_rescan_lock); btrfs_init_free_cluster(&fs_info->meta_alloc_cluster); btrfs_init_free_cluster(&fs_info->data_alloc_cluster); init_waitqueue_head(&fs_info->transaction_throttle); init_waitqueue_head(&fs_info->transaction_wait); init_waitqueue_head(&fs_info->transaction_blocked_wait); init_waitqueue_head(&fs_info->async_submit_wait); ret = btrfs_alloc_stripe_hash_table(fs_info); if (ret) { err = ret; goto fail_alloc; } __setup_root(4096, 4096, 4096, 4096, tree_root, fs_info, BTRFS_ROOT_TREE_OBJECTID); invalidate_bdev(fs_devices->latest_bdev); /* * Read super block and check the signature bytes only */ bh = btrfs_read_dev_super(fs_devices->latest_bdev); if (!bh) { err = -EINVAL; goto fail_alloc; } /* * We want to check superblock checksum, the type is stored inside. * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k). */ if (btrfs_check_super_csum(bh->b_data)) { printk(KERN_ERR "btrfs: superblock checksum mismatch\n"); err = -EINVAL; goto fail_alloc; } /* * super_copy is zeroed at allocation time and we never touch the * following bytes up to INFO_SIZE, the checksum is calculated from * the whole block of INFO_SIZE */ memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy)); memcpy(fs_info->super_for_commit, fs_info->super_copy, sizeof(*fs_info->super_for_commit)); brelse(bh); memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE); ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY); if (ret) { printk(KERN_ERR "btrfs: superblock contains fatal errors\n"); err = -EINVAL; goto fail_alloc; } disk_super = fs_info->super_copy; if (!btrfs_super_root(disk_super)) goto fail_alloc; /* check FS state, whether FS is broken. */ if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR) set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state); /* * run through our array of backup supers and setup * our ring pointer to the oldest one */ generation = btrfs_super_generation(disk_super); find_oldest_super_backup(fs_info, generation); /* * In the long term, we'll store the compression type in the super * block, and it'll be used for per file compression control. */ fs_info->compress_type = BTRFS_COMPRESS_ZLIB; ret = btrfs_parse_options(tree_root, options); if (ret) { err = ret; goto fail_alloc; } features = btrfs_super_incompat_flags(disk_super) & ~BTRFS_FEATURE_INCOMPAT_SUPP; if (features) { printk(KERN_ERR "BTRFS: couldn't mount because of " "unsupported optional features (%Lx).\n", (unsigned long long)features); err = -EINVAL; goto fail_alloc; } if (btrfs_super_leafsize(disk_super) != btrfs_super_nodesize(disk_super)) { printk(KERN_ERR "BTRFS: couldn't mount because metadata " "blocksizes don't match. node %d leaf %d\n", btrfs_super_nodesize(disk_super), btrfs_super_leafsize(disk_super)); err = -EINVAL; goto fail_alloc; } if (btrfs_super_leafsize(disk_super) > BTRFS_MAX_METADATA_BLOCKSIZE) { printk(KERN_ERR "BTRFS: couldn't mount because metadata " "blocksize (%d) was too large\n", btrfs_super_leafsize(disk_super)); err = -EINVAL; goto fail_alloc; } features = btrfs_super_incompat_flags(disk_super); features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF; if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO) features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA) printk(KERN_ERR "btrfs: has skinny extents\n"); /* * flag our filesystem as having big metadata blocks if * they are bigger than the page size */ if (btrfs_super_leafsize(disk_super) > PAGE_CACHE_SIZE) { if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA)) printk(KERN_INFO "btrfs flagging fs with big metadata feature\n"); features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA; } nodesize = btrfs_super_nodesize(disk_super); leafsize = btrfs_super_leafsize(disk_super); sectorsize = btrfs_super_sectorsize(disk_super); stripesize = btrfs_super_stripesize(disk_super); fs_info->dirty_metadata_batch = leafsize * (1 + ilog2(nr_cpu_ids)); fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids)); /* * mixed block groups end up with duplicate but slightly offset * extent buffers for the same range. It leads to corruptions */ if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) && (sectorsize != leafsize)) { printk(KERN_WARNING "btrfs: unequal leaf/node/sector sizes " "are not allowed for mixed block groups on %s\n", sb->s_id); goto fail_alloc; } /* * Needn't use the lock because there is no other task which will * update the flag. */ btrfs_set_super_incompat_flags(disk_super, features); features = btrfs_super_compat_ro_flags(disk_super) & ~BTRFS_FEATURE_COMPAT_RO_SUPP; if (!(sb->s_flags & MS_RDONLY) && features) { printk(KERN_ERR "BTRFS: couldn't mount RDWR because of " "unsupported option features (%Lx).\n", (unsigned long long)features); err = -EINVAL; goto fail_alloc; } btrfs_init_workers(&fs_info->generic_worker, "genwork", 1, NULL); btrfs_init_workers(&fs_info->workers, "worker", fs_info->thread_pool_size, &fs_info->generic_worker); btrfs_init_workers(&fs_info->delalloc_workers, "delalloc", fs_info->thread_pool_size, &fs_info->generic_worker); btrfs_init_workers(&fs_info->flush_workers, "flush_delalloc", fs_info->thread_pool_size, &fs_info->generic_worker); btrfs_init_workers(&fs_info->submit_workers, "submit", min_t(u64, fs_devices->num_devices, fs_info->thread_pool_size), &fs_info->generic_worker); btrfs_init_workers(&fs_info->caching_workers, "cache", 2, &fs_info->generic_worker); /* a higher idle thresh on the submit workers makes it much more * likely that bios will be send down in a sane order to the * devices */ fs_info->submit_workers.idle_thresh = 64; fs_info->workers.idle_thresh = 16; fs_info->workers.ordered = 1; fs_info->delalloc_workers.idle_thresh = 2; fs_info->delalloc_workers.ordered = 1; btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1, &fs_info->generic_worker); btrfs_init_workers(&fs_info->endio_workers, "endio", fs_info->thread_pool_size, &fs_info->generic_worker); btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta", fs_info->thread_pool_size, &fs_info->generic_worker); btrfs_init_workers(&fs_info->endio_meta_write_workers, "endio-meta-write", fs_info->thread_pool_size, &fs_info->generic_worker); btrfs_init_workers(&fs_info->endio_raid56_workers, "endio-raid56", fs_info->thread_pool_size, &fs_info->generic_worker); btrfs_init_workers(&fs_info->rmw_workers, "rmw", fs_info->thread_pool_size, &fs_info->generic_worker); btrfs_init_workers(&fs_info->endio_write_workers, "endio-write", fs_info->thread_pool_size, &fs_info->generic_worker); btrfs_init_workers(&fs_info->endio_freespace_worker, "freespace-write", 1, &fs_info->generic_worker); btrfs_init_workers(&fs_info->delayed_workers, "delayed-meta", fs_info->thread_pool_size, &fs_info->generic_worker); btrfs_init_workers(&fs_info->readahead_workers, "readahead", fs_info->thread_pool_size, &fs_info->generic_worker); btrfs_init_workers(&fs_info->qgroup_rescan_workers, "qgroup-rescan", 1, &fs_info->generic_worker); /* * endios are largely parallel and should have a very * low idle thresh */ fs_info->endio_workers.idle_thresh = 4; fs_info->endio_meta_workers.idle_thresh = 4; fs_info->endio_raid56_workers.idle_thresh = 4; fs_info->rmw_workers.idle_thresh = 2; fs_info->endio_write_workers.idle_thresh = 2; fs_info->endio_meta_write_workers.idle_thresh = 2; fs_info->readahead_workers.idle_thresh = 2; /* * btrfs_start_workers can really only fail because of ENOMEM so just * return -ENOMEM if any of these fail. */ ret = btrfs_start_workers(&fs_info->workers); ret |= btrfs_start_workers(&fs_info->generic_worker); ret |= btrfs_start_workers(&fs_info->submit_workers); ret |= btrfs_start_workers(&fs_info->delalloc_workers); ret |= btrfs_start_workers(&fs_info->fixup_workers); ret |= btrfs_start_workers(&fs_info->endio_workers); ret |= btrfs_start_workers(&fs_info->endio_meta_workers); ret |= btrfs_start_workers(&fs_info->rmw_workers); ret |= btrfs_start_workers(&fs_info->endio_raid56_workers); ret |= btrfs_start_workers(&fs_info->endio_meta_write_workers); ret |= btrfs_start_workers(&fs_info->endio_write_workers); ret |= btrfs_start_workers(&fs_info->endio_freespace_worker); ret |= btrfs_start_workers(&fs_info->delayed_workers); ret |= btrfs_start_workers(&fs_info->caching_workers); ret |= btrfs_start_workers(&fs_info->readahead_workers); ret |= btrfs_start_workers(&fs_info->flush_workers); ret |= btrfs_start_workers(&fs_info->qgroup_rescan_workers); if (ret) { err = -ENOMEM; goto fail_sb_buffer; } fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, 4 * 1024 * 1024 / PAGE_CACHE_SIZE); tree_root->nodesize = nodesize; tree_root->leafsize = leafsize; tree_root->sectorsize = sectorsize; tree_root->stripesize = stripesize; sb->s_blocksize = sectorsize; sb->s_blocksize_bits = blksize_bits(sectorsize); if (disk_super->magic != cpu_to_le64(BTRFS_MAGIC)) { printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id); goto fail_sb_buffer; } if (sectorsize != PAGE_SIZE) { printk(KERN_WARNING "btrfs: Incompatible sector size(%lu) " "found on %s\n", (unsigned long)sectorsize, sb->s_id); goto fail_sb_buffer; } mutex_lock(&fs_info->chunk_mutex); ret = btrfs_read_sys_array(tree_root); mutex_unlock(&fs_info->chunk_mutex); if (ret) { printk(KERN_WARNING "btrfs: failed to read the system " "array on %s\n", sb->s_id); goto fail_sb_buffer; } blocksize = btrfs_level_size(tree_root, btrfs_super_chunk_root_level(disk_super)); generation = btrfs_super_chunk_root_generation(disk_super); __setup_root(nodesize, leafsize, sectorsize, stripesize, chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID); chunk_root->node = read_tree_block(chunk_root, btrfs_super_chunk_root(disk_super), blocksize, generation); if (!chunk_root->node || !test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) { printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n", sb->s_id); goto fail_tree_roots; } btrfs_set_root_node(&chunk_root->root_item, chunk_root->node); chunk_root->commit_root = btrfs_root_node(chunk_root); read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid, (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE); ret = btrfs_read_chunk_tree(chunk_root); if (ret) { printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n", sb->s_id); goto fail_tree_roots; } /* * keep the device that is marked to be the target device for the * dev_replace procedure */ btrfs_close_extra_devices(fs_info, fs_devices, 0); if (!fs_devices->latest_bdev) { printk(KERN_CRIT "btrfs: failed to read devices on %s\n", sb->s_id); goto fail_tree_roots; } retry_root_backup: blocksize = btrfs_level_size(tree_root, btrfs_super_root_level(disk_super)); generation = btrfs_super_generation(disk_super); tree_root->node = read_tree_block(tree_root, btrfs_super_root(disk_super), blocksize, generation); if (!tree_root->node || !test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) { printk(KERN_WARNING "btrfs: failed to read tree root on %s\n", sb->s_id); goto recovery_tree_root; } btrfs_set_root_node(&tree_root->root_item, tree_root->node); tree_root->commit_root = btrfs_root_node(tree_root); ret = find_and_setup_root(tree_root, fs_info, BTRFS_EXTENT_TREE_OBJECTID, extent_root); if (ret) goto recovery_tree_root; extent_root->track_dirty = 1; ret = find_and_setup_root(tree_root, fs_info, BTRFS_DEV_TREE_OBJECTID, dev_root); if (ret) goto recovery_tree_root; dev_root->track_dirty = 1; ret = find_and_setup_root(tree_root, fs_info, BTRFS_CSUM_TREE_OBJECTID, csum_root); if (ret) goto recovery_tree_root; csum_root->track_dirty = 1; ret = find_and_setup_root(tree_root, fs_info, BTRFS_QUOTA_TREE_OBJECTID, quota_root); if (ret) { kfree(quota_root); quota_root = fs_info->quota_root = NULL; } else { quota_root->track_dirty = 1; fs_info->quota_enabled = 1; fs_info->pending_quota_state = 1; } fs_info->generation = generation; fs_info->last_trans_committed = generation; ret = btrfs_recover_balance(fs_info); if (ret) { printk(KERN_WARNING "btrfs: failed to recover balance\n"); goto fail_block_groups; } ret = btrfs_init_dev_stats(fs_info); if (ret) { printk(KERN_ERR "btrfs: failed to init dev_stats: %d\n", ret); goto fail_block_groups; } ret = btrfs_init_dev_replace(fs_info); if (ret) { pr_err("btrfs: failed to init dev_replace: %d\n", ret); goto fail_block_groups; } btrfs_close_extra_devices(fs_info, fs_devices, 1); ret = btrfs_init_space_info(fs_info); if (ret) { printk(KERN_ERR "Failed to initial space info: %d\n", ret); goto fail_block_groups; } ret = btrfs_read_block_groups(extent_root); if (ret) { printk(KERN_ERR "Failed to read block groups: %d\n", ret); goto fail_block_groups; } fs_info->num_tolerated_disk_barrier_failures = btrfs_calc_num_tolerated_disk_barrier_failures(fs_info); if (fs_info->fs_devices->missing_devices > fs_info->num_tolerated_disk_barrier_failures && !(sb->s_flags & MS_RDONLY)) { printk(KERN_WARNING "Btrfs: too many missing devices, writeable mount is not allowed\n"); goto fail_block_groups; } fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root, "btrfs-cleaner"); if (IS_ERR(fs_info->cleaner_kthread)) goto fail_block_groups; fs_info->transaction_kthread = kthread_run(transaction_kthread, tree_root, "btrfs-transaction"); if (IS_ERR(fs_info->transaction_kthread)) goto fail_cleaner; if (!btrfs_test_opt(tree_root, SSD) && !btrfs_test_opt(tree_root, NOSSD) && !fs_info->fs_devices->rotating) { printk(KERN_INFO "Btrfs detected SSD devices, enabling SSD " "mode\n"); btrfs_set_opt(fs_info->mount_opt, SSD); } #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY if (btrfs_test_opt(tree_root, CHECK_INTEGRITY)) { ret = btrfsic_mount(tree_root, fs_devices, btrfs_test_opt(tree_root, CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ? 1 : 0, fs_info->check_integrity_print_mask); if (ret) printk(KERN_WARNING "btrfs: failed to initialize" " integrity check module %s\n", sb->s_id); } #endif ret = btrfs_read_qgroup_config(fs_info); if (ret) goto fail_trans_kthread; /* do not make disk changes in broken FS */ if (btrfs_super_log_root(disk_super) != 0) { u64 bytenr = btrfs_super_log_root(disk_super); if (fs_devices->rw_devices == 0) { printk(KERN_WARNING "Btrfs log replay required " "on RO media\n"); err = -EIO; goto fail_qgroup; } blocksize = btrfs_level_size(tree_root, btrfs_super_log_root_level(disk_super)); log_tree_root = btrfs_alloc_root(fs_info); if (!log_tree_root) { err = -ENOMEM; goto fail_qgroup; } __setup_root(nodesize, leafsize, sectorsize, stripesize, log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID); log_tree_root->node = read_tree_block(tree_root, bytenr, blocksize, generation + 1); if (!log_tree_root->node || !extent_buffer_uptodate(log_tree_root->node)) { printk(KERN_ERR "btrfs: failed to read log tree\n"); free_extent_buffer(log_tree_root->node); kfree(log_tree_root); goto fail_trans_kthread; } /* returns with log_tree_root freed on success */ ret = btrfs_recover_log_trees(log_tree_root); if (ret) { btrfs_error(tree_root->fs_info, ret, "Failed to recover log tree"); free_extent_buffer(log_tree_root->node); kfree(log_tree_root); goto fail_trans_kthread; } if (sb->s_flags & MS_RDONLY) { ret = btrfs_commit_super(tree_root); if (ret) goto fail_trans_kthread; } } ret = btrfs_find_orphan_roots(tree_root); if (ret) goto fail_trans_kthread; if (!(sb->s_flags & MS_RDONLY)) { ret = btrfs_cleanup_fs_roots(fs_info); if (ret) goto fail_trans_kthread; ret = btrfs_recover_relocation(tree_root); if (ret < 0) { printk(KERN_WARNING "btrfs: failed to recover relocation\n"); err = -EINVAL; goto fail_qgroup; } } location.objectid = BTRFS_FS_TREE_OBJECTID; location.type = BTRFS_ROOT_ITEM_KEY; location.offset = (u64)-1; fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location); if (!fs_info->fs_root) goto fail_qgroup; if (IS_ERR(fs_info->fs_root)) { err = PTR_ERR(fs_info->fs_root); goto fail_qgroup; } if (sb->s_flags & MS_RDONLY) return 0; down_read(&fs_info->cleanup_work_sem); if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) || (ret = btrfs_orphan_cleanup(fs_info->tree_root))) { up_read(&fs_info->cleanup_work_sem); close_ctree(tree_root); return ret; } up_read(&fs_info->cleanup_work_sem); ret = btrfs_resume_balance_async(fs_info); if (ret) { printk(KERN_WARNING "btrfs: failed to resume balance\n"); close_ctree(tree_root); return ret; } ret = btrfs_resume_dev_replace_async(fs_info); if (ret) { pr_warn("btrfs: failed to resume dev_replace\n"); close_ctree(tree_root); return ret; } return 0; fail_qgroup: btrfs_free_qgroup_config(fs_info); fail_trans_kthread: kthread_stop(fs_info->transaction_kthread); btrfs_cleanup_transaction(fs_info->tree_root); del_fs_roots(fs_info); fail_cleaner: kthread_stop(fs_info->cleaner_kthread); /* * make sure we're done with the btree inode before we stop our * kthreads */ filemap_write_and_wait(fs_info->btree_inode->i_mapping); fail_block_groups: btrfs_put_block_group_cache(fs_info); btrfs_free_block_groups(fs_info); fail_tree_roots: free_root_pointers(fs_info, 1); invalidate_inode_pages2(fs_info->btree_inode->i_mapping); fail_sb_buffer: btrfs_stop_all_workers(fs_info); fail_alloc: fail_iput: btrfs_mapping_tree_free(&fs_info->mapping_tree); iput(fs_info->btree_inode); fail_delalloc_bytes: percpu_counter_destroy(&fs_info->delalloc_bytes); fail_dirty_metadata_bytes: percpu_counter_destroy(&fs_info->dirty_metadata_bytes); fail_bdi: bdi_destroy(&fs_info->bdi); fail_srcu: cleanup_srcu_struct(&fs_info->subvol_srcu); fail: btrfs_free_stripe_hash_table(fs_info); btrfs_close_devices(fs_info->fs_devices); return err; recovery_tree_root: if (!btrfs_test_opt(tree_root, RECOVERY)) goto fail_tree_roots; free_root_pointers(fs_info, 0); /* don't use the log in recovery mode, it won't be valid */ btrfs_set_super_log_root(disk_super, 0); /* we can't trust the free space cache either */ btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE); ret = next_root_backup(fs_info, fs_info->super_copy, &num_backups_tried, &backup_index); if (ret == -1) goto fail_block_groups; goto retry_root_backup; } static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate) { if (uptodate) { set_buffer_uptodate(bh); } else { struct btrfs_device *device = (struct btrfs_device *) bh->b_private; printk_ratelimited_in_rcu(KERN_WARNING "lost page write due to " "I/O error on %s\n", rcu_str_deref(device->name)); /* note, we dont' set_buffer_write_io_error because we have * our own ways of dealing with the IO errors */ clear_buffer_uptodate(bh); btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS); } unlock_buffer(bh); put_bh(bh); } struct buffer_head *btrfs_read_dev_super(struct block_device *bdev) { struct buffer_head *bh; struct buffer_head *latest = NULL; struct btrfs_super_block *super; int i; u64 transid = 0; u64 bytenr; /* we would like to check all the supers, but that would make * a btrfs mount succeed after a mkfs from a different FS. * So, we need to add a special mount option to scan for * later supers, using BTRFS_SUPER_MIRROR_MAX instead */ for (i = 0; i < 1; i++) { bytenr = btrfs_sb_offset(i); if (bytenr + 4096 >= i_size_read(bdev->bd_inode)) break; bh = __bread(bdev, bytenr / 4096, 4096); if (!bh) continue; super = (struct btrfs_super_block *)bh->b_data; if (btrfs_super_bytenr(super) != bytenr || super->magic != cpu_to_le64(BTRFS_MAGIC)) { brelse(bh); continue; } if (!latest || btrfs_super_generation(super) > transid) { brelse(latest); latest = bh; transid = btrfs_super_generation(super); } else { brelse(bh); } } return latest; } /* * this should be called twice, once with wait == 0 and * once with wait == 1. When wait == 0 is done, all the buffer heads * we write are pinned. * * They are released when wait == 1 is done. * max_mirrors must be the same for both runs, and it indicates how * many supers on this one device should be written. * * max_mirrors == 0 means to write them all. */ static int write_dev_supers(struct btrfs_device *device, struct btrfs_super_block *sb, int do_barriers, int wait, int max_mirrors) { struct buffer_head *bh; int i; int ret; int errors = 0; u32 crc; u64 bytenr; if (max_mirrors == 0) max_mirrors = BTRFS_SUPER_MIRROR_MAX; for (i = 0; i < max_mirrors; i++) { bytenr = btrfs_sb_offset(i); if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes) break; if (wait) { bh = __find_get_block(device->bdev, bytenr / 4096, BTRFS_SUPER_INFO_SIZE); if (!bh) { errors++; continue; } wait_on_buffer(bh); if (!buffer_uptodate(bh)) errors++; /* drop our reference */ brelse(bh); /* drop the reference from the wait == 0 run */ brelse(bh); continue; } else { btrfs_set_super_bytenr(sb, bytenr); crc = ~(u32)0; crc = btrfs_csum_data((char *)sb + BTRFS_CSUM_SIZE, crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE); btrfs_csum_final(crc, sb->csum); /* * one reference for us, and we leave it for the * caller */ bh = __getblk(device->bdev, bytenr / 4096, BTRFS_SUPER_INFO_SIZE); if (!bh) { printk(KERN_ERR "btrfs: couldn't get super " "buffer head for bytenr %Lu\n", bytenr); errors++; continue; } memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE); /* one reference for submit_bh */ get_bh(bh); set_buffer_uptodate(bh); lock_buffer(bh); bh->b_end_io = btrfs_end_buffer_write_sync; bh->b_private = device; } /* * we fua the first super. The others we allow * to go down lazy. */ ret = btrfsic_submit_bh(WRITE_FUA, bh); if (ret) errors++; } return errors < i ? 0 : -1; } /* * endio for the write_dev_flush, this will wake anyone waiting * for the barrier when it is done */ static void btrfs_end_empty_barrier(struct bio *bio, int err) { if (err) { if (err == -EOPNOTSUPP) set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); clear_bit(BIO_UPTODATE, &bio->bi_flags); } if (bio->bi_private) complete(bio->bi_private); bio_put(bio); } /* * trigger flushes for one the devices. If you pass wait == 0, the flushes are * sent down. With wait == 1, it waits for the previous flush. * * any device where the flush fails with eopnotsupp are flagged as not-barrier * capable */ static int write_dev_flush(struct btrfs_device *device, int wait) { struct bio *bio; int ret = 0; if (device->nobarriers) return 0; if (wait) { bio = device->flush_bio; if (!bio) return 0; wait_for_completion(&device->flush_wait); if (bio_flagged(bio, BIO_EOPNOTSUPP)) { printk_in_rcu("btrfs: disabling barriers on dev %s\n", rcu_str_deref(device->name)); device->nobarriers = 1; } else if (!bio_flagged(bio, BIO_UPTODATE)) { ret = -EIO; btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_FLUSH_ERRS); } /* drop the reference from the wait == 0 run */ bio_put(bio); device->flush_bio = NULL; return ret; } /* * one reference for us, and we leave it for the * caller */ device->flush_bio = NULL; bio = btrfs_io_bio_alloc(GFP_NOFS, 0); if (!bio) return -ENOMEM; bio->bi_end_io = btrfs_end_empty_barrier; bio->bi_bdev = device->bdev; init_completion(&device->flush_wait); bio->bi_private = &device->flush_wait; device->flush_bio = bio; bio_get(bio); btrfsic_submit_bio(WRITE_FLUSH, bio); return 0; } /* * send an empty flush down to each device in parallel, * then wait for them */ static int barrier_all_devices(struct btrfs_fs_info *info) { struct list_head *head; struct btrfs_device *dev; int errors_send = 0; int errors_wait = 0; int ret; /* send down all the barriers */ head = &info->fs_devices->devices; list_for_each_entry_rcu(dev, head, dev_list) { if (dev->missing) continue; if (!dev->bdev) { errors_send++; continue; } if (!dev->in_fs_metadata || !dev->writeable) continue; ret = write_dev_flush(dev, 0); if (ret) errors_send++; } /* wait for all the barriers */ list_for_each_entry_rcu(dev, head, dev_list) { if (dev->missing) continue; if (!dev->bdev) { errors_wait++; continue; } if (!dev->in_fs_metadata || !dev->writeable) continue; ret = write_dev_flush(dev, 1); if (ret) errors_wait++; } if (errors_send > info->num_tolerated_disk_barrier_failures || errors_wait > info->num_tolerated_disk_barrier_failures) return -EIO; return 0; } int btrfs_calc_num_tolerated_disk_barrier_failures( struct btrfs_fs_info *fs_info) { struct btrfs_ioctl_space_info space; struct btrfs_space_info *sinfo; u64 types[] = {BTRFS_BLOCK_GROUP_DATA, BTRFS_BLOCK_GROUP_SYSTEM, BTRFS_BLOCK_GROUP_METADATA, BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA}; int num_types = 4; int i; int c; int num_tolerated_disk_barrier_failures = (int)fs_info->fs_devices->num_devices; for (i = 0; i < num_types; i++) { struct btrfs_space_info *tmp; sinfo = NULL; rcu_read_lock(); list_for_each_entry_rcu(tmp, &fs_info->space_info, list) { if (tmp->flags == types[i]) { sinfo = tmp; break; } } rcu_read_unlock(); if (!sinfo) continue; down_read(&sinfo->groups_sem); for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) { if (!list_empty(&sinfo->block_groups[c])) { u64 flags; btrfs_get_block_group_info( &sinfo->block_groups[c], &space); if (space.total_bytes == 0 || space.used_bytes == 0) continue; flags = space.flags; /* * return * 0: if dup, single or RAID0 is configured for * any of metadata, system or data, else * 1: if RAID5 is configured, or if RAID1 or * RAID10 is configured and only two mirrors * are used, else * 2: if RAID6 is configured, else * num_mirrors - 1: if RAID1 or RAID10 is * configured and more than * 2 mirrors are used. */ if (num_tolerated_disk_barrier_failures > 0 && ((flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0)) || ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0))) num_tolerated_disk_barrier_failures = 0; else if (num_tolerated_disk_barrier_failures > 1) { if (flags & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID10)) { num_tolerated_disk_barrier_failures = 1; } else if (flags & BTRFS_BLOCK_GROUP_RAID5) { num_tolerated_disk_barrier_failures = 2; } } } } up_read(&sinfo->groups_sem); } return num_tolerated_disk_barrier_failures; } static int write_all_supers(struct btrfs_root *root, int max_mirrors) { struct list_head *head; struct btrfs_device *dev; struct btrfs_super_block *sb; struct btrfs_dev_item *dev_item; int ret; int do_barriers; int max_errors; int total_errors = 0; u64 flags; max_errors = btrfs_super_num_devices(root->fs_info->super_copy) - 1; do_barriers = !btrfs_test_opt(root, NOBARRIER); backup_super_roots(root->fs_info); sb = root->fs_info->super_for_commit; dev_item = &sb->dev_item; mutex_lock(&root->fs_info->fs_devices->device_list_mutex); head = &root->fs_info->fs_devices->devices; if (do_barriers) { ret = barrier_all_devices(root->fs_info); if (ret) { mutex_unlock( &root->fs_info->fs_devices->device_list_mutex); btrfs_error(root->fs_info, ret, "errors while submitting device barriers."); return ret; } } list_for_each_entry_rcu(dev, head, dev_list) { if (!dev->bdev) { total_errors++; continue; } if (!dev->in_fs_metadata || !dev->writeable) continue; btrfs_set_stack_device_generation(dev_item, 0); btrfs_set_stack_device_type(dev_item, dev->type); btrfs_set_stack_device_id(dev_item, dev->devid); btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes); btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used); btrfs_set_stack_device_io_align(dev_item, dev->io_align); btrfs_set_stack_device_io_width(dev_item, dev->io_width); btrfs_set_stack_device_sector_size(dev_item, dev->sector_size); memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE); memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE); flags = btrfs_super_flags(sb); btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN); ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors); if (ret) total_errors++; } if (total_errors > max_errors) { printk(KERN_ERR "btrfs: %d errors while writing supers\n", total_errors); /* This shouldn't happen. FUA is masked off if unsupported */ BUG(); } total_errors = 0; list_for_each_entry_rcu(dev, head, dev_list) { if (!dev->bdev) continue; if (!dev->in_fs_metadata || !dev->writeable) continue; ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors); if (ret) total_errors++; } mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); if (total_errors > max_errors) { btrfs_error(root->fs_info, -EIO, "%d errors while writing supers", total_errors); return -EIO; } return 0; } int write_ctree_super(struct btrfs_trans_handle *trans, struct btrfs_root *root, int max_mirrors) { int ret; ret = write_all_supers(root, max_mirrors); return ret; } void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root) { spin_lock(&fs_info->fs_roots_radix_lock); radix_tree_delete(&fs_info->fs_roots_radix, (unsigned long)root->root_key.objectid); spin_unlock(&fs_info->fs_roots_radix_lock); if (btrfs_root_refs(&root->root_item) == 0) synchronize_srcu(&fs_info->subvol_srcu); if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { btrfs_free_log(NULL, root); btrfs_free_log_root_tree(NULL, fs_info); } __btrfs_remove_free_space_cache(root->free_ino_pinned); __btrfs_remove_free_space_cache(root->free_ino_ctl); free_fs_root(root); } static void free_fs_root(struct btrfs_root *root) { iput(root->cache_inode); WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree)); if (root->anon_dev) free_anon_bdev(root->anon_dev); free_extent_buffer(root->node); free_extent_buffer(root->commit_root); kfree(root->free_ino_ctl); kfree(root->free_ino_pinned); kfree(root->name); kfree(root); } int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info) { u64 root_objectid = 0; struct btrfs_root *gang[8]; int i; int ret; while (1) { ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, (void **)gang, root_objectid, ARRAY_SIZE(gang)); if (!ret) break; root_objectid = gang[ret - 1]->root_key.objectid + 1; for (i = 0; i < ret; i++) { int err; root_objectid = gang[i]->root_key.objectid; err = btrfs_orphan_cleanup(gang[i]); if (err) return err; } root_objectid++; } return 0; } int btrfs_commit_super(struct btrfs_root *root) { struct btrfs_trans_handle *trans; int ret; mutex_lock(&root->fs_info->cleaner_mutex); btrfs_run_delayed_iputs(root); mutex_unlock(&root->fs_info->cleaner_mutex); wake_up_process(root->fs_info->cleaner_kthread); /* wait until ongoing cleanup work done */ down_write(&root->fs_info->cleanup_work_sem); up_write(&root->fs_info->cleanup_work_sem); trans = btrfs_join_transaction(root); if (IS_ERR(trans)) return PTR_ERR(trans); ret = btrfs_commit_transaction(trans, root); if (ret) return ret; /* run commit again to drop the original snapshot */ trans = btrfs_join_transaction(root); if (IS_ERR(trans)) return PTR_ERR(trans); ret = btrfs_commit_transaction(trans, root); if (ret) return ret; ret = btrfs_write_and_wait_transaction(NULL, root); if (ret) { btrfs_error(root->fs_info, ret, "Failed to sync btree inode to disk."); return ret; } ret = write_ctree_super(NULL, root, 0); return ret; } int close_ctree(struct btrfs_root *root) { struct btrfs_fs_info *fs_info = root->fs_info; int ret; fs_info->closing = 1; smp_mb(); /* pause restriper - we want to resume on mount */ btrfs_pause_balance(fs_info); btrfs_dev_replace_suspend_for_unmount(fs_info); btrfs_scrub_cancel(fs_info); /* wait for any defraggers to finish */ wait_event(fs_info->transaction_wait, (atomic_read(&fs_info->defrag_running) == 0)); /* clear out the rbtree of defraggable inodes */ btrfs_cleanup_defrag_inodes(fs_info); if (!(fs_info->sb->s_flags & MS_RDONLY)) { ret = btrfs_commit_super(root); if (ret) printk(KERN_ERR "btrfs: commit super ret %d\n", ret); } if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) btrfs_error_commit_super(root); btrfs_put_block_group_cache(fs_info); kthread_stop(fs_info->transaction_kthread); kthread_stop(fs_info->cleaner_kthread); fs_info->closing = 2; smp_mb(); btrfs_free_qgroup_config(root->fs_info); if (percpu_counter_sum(&fs_info->delalloc_bytes)) { printk(KERN_INFO "btrfs: at unmount delalloc count %lld\n", percpu_counter_sum(&fs_info->delalloc_bytes)); } btrfs_free_block_groups(fs_info); /* * we must make sure there is not any read request to * submit after we stopping all workers. */ invalidate_inode_pages2(fs_info->btree_inode->i_mapping); btrfs_stop_all_workers(fs_info); del_fs_roots(fs_info); free_root_pointers(fs_info, 1); iput(fs_info->btree_inode); #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY if (btrfs_test_opt(root, CHECK_INTEGRITY)) btrfsic_unmount(root, fs_info->fs_devices); #endif btrfs_close_devices(fs_info->fs_devices); btrfs_mapping_tree_free(&fs_info->mapping_tree); percpu_counter_destroy(&fs_info->dirty_metadata_bytes); percpu_counter_destroy(&fs_info->delalloc_bytes); bdi_destroy(&fs_info->bdi); cleanup_srcu_struct(&fs_info->subvol_srcu); btrfs_free_stripe_hash_table(fs_info); return 0; } int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid, int atomic) { int ret; struct inode *btree_inode = buf->pages[0]->mapping->host; ret = extent_buffer_uptodate(buf); if (!ret) return ret; ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf, parent_transid, atomic); if (ret == -EAGAIN) return ret; return !ret; } int btrfs_set_buffer_uptodate(struct extent_buffer *buf) { return set_extent_buffer_uptodate(buf); } void btrfs_mark_buffer_dirty(struct extent_buffer *buf) { struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root; u64 transid = btrfs_header_generation(buf); int was_dirty; btrfs_assert_tree_locked(buf); if (transid != root->fs_info->generation) WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, " "found %llu running %llu\n", (unsigned long long)buf->start, (unsigned long long)transid, (unsigned long long)root->fs_info->generation); was_dirty = set_extent_buffer_dirty(buf); if (!was_dirty) __percpu_counter_add(&root->fs_info->dirty_metadata_bytes, buf->len, root->fs_info->dirty_metadata_batch); } static void __btrfs_btree_balance_dirty(struct btrfs_root *root, int flush_delayed) { /* * looks as though older kernels can get into trouble with * this code, they end up stuck in balance_dirty_pages forever */ int ret; if (current->flags & PF_MEMALLOC) return; if (flush_delayed) btrfs_balance_delayed_items(root); ret = percpu_counter_compare(&root->fs_info->dirty_metadata_bytes, BTRFS_DIRTY_METADATA_THRESH); if (ret > 0) { balance_dirty_pages_ratelimited( root->fs_info->btree_inode->i_mapping); } return; } void btrfs_btree_balance_dirty(struct btrfs_root *root) { __btrfs_btree_balance_dirty(root, 1); } void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root) { __btrfs_btree_balance_dirty(root, 0); } int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid) { struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root; return btree_read_extent_buffer_pages(root, buf, 0, parent_transid); } static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, int read_only) { /* * Placeholder for checks */ return 0; } static void btrfs_error_commit_super(struct btrfs_root *root) { mutex_lock(&root->fs_info->cleaner_mutex); btrfs_run_delayed_iputs(root); mutex_unlock(&root->fs_info->cleaner_mutex); down_write(&root->fs_info->cleanup_work_sem); up_write(&root->fs_info->cleanup_work_sem); /* cleanup FS via transaction */ btrfs_cleanup_transaction(root); } static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t, struct btrfs_root *root) { struct btrfs_inode *btrfs_inode; struct list_head splice; INIT_LIST_HEAD(&splice); mutex_lock(&root->fs_info->ordered_operations_mutex); spin_lock(&root->fs_info->ordered_extent_lock); list_splice_init(&t->ordered_operations, &splice); while (!list_empty(&splice)) { btrfs_inode = list_entry(splice.next, struct btrfs_inode, ordered_operations); list_del_init(&btrfs_inode->ordered_operations); spin_unlock(&root->fs_info->ordered_extent_lock); btrfs_invalidate_inodes(btrfs_inode->root); spin_lock(&root->fs_info->ordered_extent_lock); } spin_unlock(&root->fs_info->ordered_extent_lock); mutex_unlock(&root->fs_info->ordered_operations_mutex); } static void btrfs_destroy_ordered_extents(struct btrfs_root *root) { struct btrfs_ordered_extent *ordered; spin_lock(&root->fs_info->ordered_extent_lock); /* * This will just short circuit the ordered completion stuff which will * make sure the ordered extent gets properly cleaned up. */ list_for_each_entry(ordered, &root->fs_info->ordered_extents, root_extent_list) set_bit(BTRFS_ORDERED_IOERR, &ordered->flags); spin_unlock(&root->fs_info->ordered_extent_lock); } int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, struct btrfs_root *root) { struct rb_node *node; struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_delayed_ref_node *ref; int ret = 0; delayed_refs = &trans->delayed_refs; spin_lock(&delayed_refs->lock); if (delayed_refs->num_entries == 0) { spin_unlock(&delayed_refs->lock); printk(KERN_INFO "delayed_refs has NO entry\n"); return ret; } while ((node = rb_first(&delayed_refs->root)) != NULL) { struct btrfs_delayed_ref_head *head = NULL; ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); atomic_set(&ref->refs, 1); if (btrfs_delayed_ref_is_head(ref)) { head = btrfs_delayed_node_to_head(ref); if (!mutex_trylock(&head->mutex)) { atomic_inc(&ref->refs); spin_unlock(&delayed_refs->lock); /* Need to wait for the delayed ref to run */ mutex_lock(&head->mutex); mutex_unlock(&head->mutex); btrfs_put_delayed_ref(ref); spin_lock(&delayed_refs->lock); continue; } if (head->must_insert_reserved) btrfs_pin_extent(root, ref->bytenr, ref->num_bytes, 1); btrfs_free_delayed_extent_op(head->extent_op); delayed_refs->num_heads--; if (list_empty(&head->cluster)) delayed_refs->num_heads_ready--; list_del_init(&head->cluster); } ref->in_tree = 0; rb_erase(&ref->rb_node, &delayed_refs->root); delayed_refs->num_entries--; if (head) mutex_unlock(&head->mutex); spin_unlock(&delayed_refs->lock); btrfs_put_delayed_ref(ref); cond_resched(); spin_lock(&delayed_refs->lock); } spin_unlock(&delayed_refs->lock); return ret; } static void btrfs_evict_pending_snapshots(struct btrfs_transaction *t) { struct btrfs_pending_snapshot *snapshot; struct list_head splice; INIT_LIST_HEAD(&splice); list_splice_init(&t->pending_snapshots, &splice); while (!list_empty(&splice)) { snapshot = list_entry(splice.next, struct btrfs_pending_snapshot, list); snapshot->error = -ECANCELED; list_del_init(&snapshot->list); } } static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root) { struct btrfs_inode *btrfs_inode; struct list_head splice; INIT_LIST_HEAD(&splice); spin_lock(&root->fs_info->delalloc_lock); list_splice_init(&root->fs_info->delalloc_inodes, &splice); while (!list_empty(&splice)) { btrfs_inode = list_entry(splice.next, struct btrfs_inode, delalloc_inodes); list_del_init(&btrfs_inode->delalloc_inodes); clear_bit(BTRFS_INODE_IN_DELALLOC_LIST, &btrfs_inode->runtime_flags); spin_unlock(&root->fs_info->delalloc_lock); btrfs_invalidate_inodes(btrfs_inode->root); spin_lock(&root->fs_info->delalloc_lock); } spin_unlock(&root->fs_info->delalloc_lock); } static int btrfs_destroy_marked_extents(struct btrfs_root *root, struct extent_io_tree *dirty_pages, int mark) { int ret; struct extent_buffer *eb; u64 start = 0; u64 end; while (1) { ret = find_first_extent_bit(dirty_pages, start, &start, &end, mark, NULL); if (ret) break; clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS); while (start <= end) { eb = btrfs_find_tree_block(root, start, root->leafsize); start += root->leafsize; if (!eb) continue; wait_on_extent_buffer_writeback(eb); if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) clear_extent_buffer_dirty(eb); free_extent_buffer_stale(eb); } } return ret; } static int btrfs_destroy_pinned_extent(struct btrfs_root *root, struct extent_io_tree *pinned_extents) { struct extent_io_tree *unpin; u64 start; u64 end; int ret; bool loop = true; unpin = pinned_extents; again: while (1) { ret = find_first_extent_bit(unpin, 0, &start, &end, EXTENT_DIRTY, NULL); if (ret) break; clear_extent_dirty(unpin, start, end, GFP_NOFS); btrfs_error_unpin_extent_range(root, start, end); cond_resched(); } if (loop) { if (unpin == &root->fs_info->freed_extents[0]) unpin = &root->fs_info->freed_extents[1]; else unpin = &root->fs_info->freed_extents[0]; loop = false; goto again; } return 0; } void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans, struct btrfs_root *root) { btrfs_destroy_delayed_refs(cur_trans, root); btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv, cur_trans->dirty_pages.dirty_bytes); /* FIXME: cleanup wait for commit */ cur_trans->in_commit = 1; cur_trans->blocked = 1; wake_up(&root->fs_info->transaction_blocked_wait); btrfs_evict_pending_snapshots(cur_trans); cur_trans->blocked = 0; wake_up(&root->fs_info->transaction_wait); cur_trans->commit_done = 1; wake_up(&cur_trans->commit_wait); btrfs_destroy_delayed_inodes(root); btrfs_assert_delayed_root_empty(root); btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages, EXTENT_DIRTY); btrfs_destroy_pinned_extent(root, root->fs_info->pinned_extents); /* memset(cur_trans, 0, sizeof(*cur_trans)); kmem_cache_free(btrfs_transaction_cachep, cur_trans); */ } static int btrfs_cleanup_transaction(struct btrfs_root *root) { struct btrfs_transaction *t; LIST_HEAD(list); mutex_lock(&root->fs_info->transaction_kthread_mutex); spin_lock(&root->fs_info->trans_lock); list_splice_init(&root->fs_info->trans_list, &list); root->fs_info->trans_no_join = 1; spin_unlock(&root->fs_info->trans_lock); while (!list_empty(&list)) { t = list_entry(list.next, struct btrfs_transaction, list); btrfs_destroy_ordered_operations(t, root); btrfs_destroy_ordered_extents(root); btrfs_destroy_delayed_refs(t, root); /* FIXME: cleanup wait for commit */ t->in_commit = 1; t->blocked = 1; smp_mb(); if (waitqueue_active(&root->fs_info->transaction_blocked_wait)) wake_up(&root->fs_info->transaction_blocked_wait); btrfs_evict_pending_snapshots(t); t->blocked = 0; smp_mb(); if (waitqueue_active(&root->fs_info->transaction_wait)) wake_up(&root->fs_info->transaction_wait); t->commit_done = 1; smp_mb(); if (waitqueue_active(&t->commit_wait)) wake_up(&t->commit_wait); btrfs_destroy_delayed_inodes(root); btrfs_assert_delayed_root_empty(root); btrfs_destroy_delalloc_inodes(root); spin_lock(&root->fs_info->trans_lock); root->fs_info->running_transaction = NULL; spin_unlock(&root->fs_info->trans_lock); btrfs_destroy_marked_extents(root, &t->dirty_pages, EXTENT_DIRTY); btrfs_destroy_pinned_extent(root, root->fs_info->pinned_extents); atomic_set(&t->use_count, 0); list_del_init(&t->list); memset(t, 0, sizeof(*t)); kmem_cache_free(btrfs_transaction_cachep, t); } spin_lock(&root->fs_info->trans_lock); root->fs_info->trans_no_join = 0; spin_unlock(&root->fs_info->trans_lock); mutex_unlock(&root->fs_info->transaction_kthread_mutex); return 0; } static struct extent_io_ops btree_extent_io_ops = { .readpage_end_io_hook = btree_readpage_end_io_hook, .readpage_io_failed_hook = btree_io_failed_hook, .submit_bio_hook = btree_submit_bio_hook, /* note we're sharing with inode.c for the merge bio hook */ .merge_bio_hook = btrfs_merge_bio_hook, };
gpl-2.0
mtb3000gt/Deathly_Kernel_D2
drivers/staging/speakup/selection.c
775
4490
#include <linux/slab.h> /* for kmalloc */ #include <linux/consolemap.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/selection.h> #include <linux/workqueue.h> #include <asm/cmpxchg.h> #include "speakup.h" /* ------ cut and paste ----- */ /* Don't take this from <ctype.h>: 011-015 on the screen aren't spaces */ #define ishardspace(c) ((c) == ' ') unsigned short spk_xs, spk_ys, spk_xe, spk_ye; /* our region points */ /* Variables for selection control. */ /* must not be disallocated */ struct vc_data *spk_sel_cons; /* cleared by clear_selection */ static int sel_start = -1; static int sel_end; static int sel_buffer_lth; static char *sel_buffer; static unsigned char sel_pos(int n) { return inverse_translate(spk_sel_cons, screen_glyph(spk_sel_cons, n), 0); } void speakup_clear_selection(void) { sel_start = -1; } /* does screen address p correspond to character at LH/RH edge of screen? */ static int atedge(const int p, int size_row) { return !(p % size_row) || !((p + 2) % size_row); } /* constrain v such that v <= u */ static unsigned short limit(const unsigned short v, const unsigned short u) { return (v > u) ? u : v; } int speakup_set_selection(struct tty_struct *tty) { int new_sel_start, new_sel_end; char *bp, *obp; int i, ps, pe; struct vc_data *vc = vc_cons[fg_console].d; spk_xs = limit(spk_xs, vc->vc_cols - 1); spk_ys = limit(spk_ys, vc->vc_rows - 1); spk_xe = limit(spk_xe, vc->vc_cols - 1); spk_ye = limit(spk_ye, vc->vc_rows - 1); ps = spk_ys * vc->vc_size_row + (spk_xs << 1); pe = spk_ye * vc->vc_size_row + (spk_xe << 1); if (ps > pe) { /* make sel_start <= sel_end */ int tmp = ps; ps = pe; pe = tmp; } if (spk_sel_cons != vc_cons[fg_console].d) { speakup_clear_selection(); spk_sel_cons = vc_cons[fg_console].d; printk(KERN_WARNING "Selection: mark console not the same as cut\n"); return -EINVAL; } new_sel_start = ps; new_sel_end = pe; /* select to end of line if on trailing space */ if (new_sel_end > new_sel_start && !atedge(new_sel_end, vc->vc_size_row) && ishardspace(sel_pos(new_sel_end))) { for (pe = new_sel_end + 2; ; pe += 2) if (!ishardspace(sel_pos(pe)) || atedge(pe, vc->vc_size_row)) break; if (ishardspace(sel_pos(pe))) new_sel_end = pe; } if ((new_sel_start == sel_start) && (new_sel_end == sel_end)) return 0; /* no action required */ sel_start = new_sel_start; sel_end = new_sel_end; /* Allocate a new buffer before freeing the old one ... */ bp = kmalloc((sel_end-sel_start)/2+1, GFP_ATOMIC); if (!bp) { printk(KERN_WARNING "selection: kmalloc() failed\n"); speakup_clear_selection(); return -ENOMEM; } kfree(sel_buffer); sel_buffer = bp; obp = bp; for (i = sel_start; i <= sel_end; i += 2) { *bp = sel_pos(i); if (!ishardspace(*bp++)) obp = bp; if (!((i + 2) % vc->vc_size_row)) { /* strip trailing blanks from line and add newline, unless non-space at end of line. */ if (obp != bp) { bp = obp; *bp++ = '\r'; } obp = bp; } } sel_buffer_lth = bp - sel_buffer; return 0; } struct speakup_paste_work { struct work_struct work; struct tty_struct *tty; }; static void __speakup_paste_selection(struct work_struct *work) { struct speakup_paste_work *spw = container_of(work, struct speakup_paste_work, work); struct tty_struct *tty = xchg(&spw->tty, NULL); struct vc_data *vc = (struct vc_data *) tty->driver_data; int pasted = 0, count; DECLARE_WAITQUEUE(wait, current); add_wait_queue(&vc->paste_wait, &wait); while (sel_buffer && sel_buffer_lth > pasted) { set_current_state(TASK_INTERRUPTIBLE); if (test_bit(TTY_THROTTLED, &tty->flags)) { schedule(); continue; } count = sel_buffer_lth - pasted; count = min_t(int, count, tty->receive_room); tty->ldisc->ops->receive_buf(tty, sel_buffer + pasted, 0, count); pasted += count; } remove_wait_queue(&vc->paste_wait, &wait); current->state = TASK_RUNNING; tty_kref_put(tty); } static struct speakup_paste_work speakup_paste_work = { .work = __WORK_INITIALIZER(speakup_paste_work.work, __speakup_paste_selection) }; int speakup_paste_selection(struct tty_struct *tty) { if (cmpxchg(&speakup_paste_work.tty, NULL, tty) != NULL) return -EBUSY; tty_kref_get(tty); schedule_work_on(WORK_CPU_UNBOUND, &speakup_paste_work.work); return 0; } void speakup_cancel_paste(void) { cancel_work_sync(&speakup_paste_work.work); tty_kref_put(speakup_paste_work.tty); }
gpl-2.0
chilisom/linux-stable
drivers/media/pci/solo6x10/solo6x10-tw28.c
1287
26415
/* * Copyright (C) 2010-2013 Bluecherry, LLC <http://www.bluecherrydvr.com> * * Original author: * Ben Collins <bcollins@ubuntu.com> * * Additional work by: * John Brooks <john.brooks@bluecherry.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/delay.h> #include "solo6x10.h" #include "solo6x10-tw28.h" #define DEFAULT_HDELAY_NTSC (32 - 8) #define DEFAULT_HACTIVE_NTSC (720 + 16) #define DEFAULT_VDELAY_NTSC (7 - 2) #define DEFAULT_VACTIVE_NTSC (240 + 4) #define DEFAULT_HDELAY_PAL (32 + 4) #define DEFAULT_HACTIVE_PAL (864-DEFAULT_HDELAY_PAL) #define DEFAULT_VDELAY_PAL (6) #define DEFAULT_VACTIVE_PAL (312-DEFAULT_VDELAY_PAL) static const u8 tbl_tw2864_ntsc_template[] = { 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x02, /* 0x00 */ 0x12, 0xf5, 0x0c, 0xd0, 0x00, 0x00, 0x00, 0x7f, 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x02, /* 0x10 */ 0x12, 0xf5, 0x0c, 0xd0, 0x00, 0x00, 0x00, 0x7f, 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x02, /* 0x20 */ 0x12, 0xf5, 0x0c, 0xd0, 0x00, 0x00, 0x00, 0x7f, 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x02, /* 0x30 */ 0x12, 0xf5, 0x0c, 0xd0, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA3, 0x00, 0x00, 0x02, 0x00, 0xcc, 0x00, 0x80, 0x44, 0x50, /* 0x80 */ 0x22, 0x01, 0xd8, 0xbc, 0xb8, 0x44, 0x38, 0x00, 0x00, 0x78, 0x72, 0x3e, 0x14, 0xa5, 0xe4, 0x05, /* 0x90 */ 0x00, 0x28, 0x44, 0x44, 0xa0, 0x88, 0x5a, 0x01, 0x08, 0x08, 0x08, 0x08, 0x1a, 0x1a, 0x1a, 0x1a, /* 0xa0 */ 0x00, 0x00, 0x00, 0xf0, 0xf0, 0xf0, 0xf0, 0x44, 0x44, 0x0a, 0x00, 0xff, 0xef, 0xef, 0xef, 0xef, /* 0xb0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0 */ 0x00, 0x00, 0x55, 0x00, 0xb1, 0xe4, 0x40, 0x00, 0x77, 0x77, 0x01, 0x13, 0x57, 0x9b, 0xdf, 0x20, /* 0xd0 */ 0x64, 0xa8, 0xec, 0xc1, 0x0f, 0x11, 0x11, 0x81, 0x00, 0xe0, 0xbb, 0xbb, 0x00, 0x11, 0x00, 0x00, /* 0xe0 */ 0x11, 0x00, 0x00, 0x11, 0x00, 0x00, 0x11, 0x00, 0x83, 0xb5, 0x09, 0x78, 0x85, 0x00, 0x01, 0x20, /* 0xf0 */ 0x64, 0x11, 0x40, 0xaf, 0xff, 0x00, 0x00, 0x00, }; static const u8 tbl_tw2864_pal_template[] = { 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, /* 0x00 */ 0x18, 0xf5, 0x0c, 0xd0, 0x00, 0x00, 0x01, 0x7f, 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, /* 0x10 */ 0x18, 0xf5, 0x0c, 0xd0, 0x00, 0x00, 0x01, 0x7f, 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, /* 0x20 */ 0x18, 0xf5, 0x0c, 0xd0, 0x00, 0x00, 0x01, 0x7f, 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, /* 0x30 */ 0x18, 0xf5, 0x0c, 0xd0, 0x00, 0x00, 0x01, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA3, 0x00, 0x00, 0x02, 0x00, 0xcc, 0x00, 0x80, 0x44, 0x50, /* 0x80 */ 0x22, 0x01, 0xd8, 0xbc, 0xb8, 0x44, 0x38, 0x00, 0x00, 0x78, 0x72, 0x3e, 0x14, 0xa5, 0xe4, 0x05, /* 0x90 */ 0x00, 0x28, 0x44, 0x44, 0xa0, 0x90, 0x5a, 0x01, 0x0a, 0x0a, 0x0a, 0x0a, 0x1a, 0x1a, 0x1a, 0x1a, /* 0xa0 */ 0x00, 0x00, 0x00, 0xf0, 0xf0, 0xf0, 0xf0, 0x44, 0x44, 0x0a, 0x00, 0xff, 0xef, 0xef, 0xef, 0xef, /* 0xb0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0 */ 0x00, 0x00, 0x55, 0x00, 0xb1, 0xe4, 0x40, 0x00, 0x77, 0x77, 0x01, 0x13, 0x57, 0x9b, 0xdf, 0x20, /* 0xd0 */ 0x64, 0xa8, 0xec, 0xc1, 0x0f, 0x11, 0x11, 0x81, 0x00, 0xe0, 0xbb, 0xbb, 0x00, 0x11, 0x00, 0x00, /* 0xe0 */ 0x11, 0x00, 0x00, 0x11, 0x00, 0x00, 0x11, 0x00, 0x83, 0xb5, 0x09, 0x00, 0xa0, 0x00, 0x01, 0x20, /* 0xf0 */ 0x64, 0x11, 0x40, 0xaf, 0xff, 0x00, 0x00, 0x00, }; static const u8 tbl_tw2865_ntsc_template[] = { 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x02, /* 0x00 */ 0x12, 0xff, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f, 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x02, /* 0x10 */ 0x12, 0xff, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f, 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x02, /* 0x20 */ 0x12, 0xff, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f, 0x00, 0xf0, 0x70, 0x48, 0x80, 0x80, 0x00, 0x02, /* 0x30 */ 0x12, 0xff, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x00, 0x90, 0x68, 0x00, 0x38, 0x80, 0x80, /* 0x40 */ 0x80, 0x80, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x21, 0x43, 0x08, 0x00, 0x00, 0x01, 0xf1, 0x03, 0xEF, 0x03, /* 0x70 */ 0xE9, 0x03, 0xD9, 0x15, 0x15, 0xE4, 0xA3, 0x80, 0x00, 0x02, 0x00, 0xCC, 0x00, 0x80, 0x44, 0x50, /* 0x80 */ 0x22, 0x01, 0xD8, 0xBC, 0xB8, 0x44, 0x38, 0x00, 0x00, 0x78, 0x44, 0x3D, 0x14, 0xA5, 0xE0, 0x05, /* 0x90 */ 0x00, 0x28, 0x44, 0x44, 0xA0, 0x90, 0x52, 0x13, 0x08, 0x08, 0x08, 0x08, 0x1A, 0x1A, 0x1B, 0x1A, /* 0xa0 */ 0x00, 0x00, 0x00, 0xF0, 0xF0, 0xF0, 0xF0, 0x44, 0x44, 0x4A, 0x00, 0xFF, 0xEF, 0xEF, 0xEF, 0xEF, /* 0xb0 */ 0xFF, 0xE7, 0xE9, 0xE9, 0xEB, 0xFF, 0xD6, 0xD8, 0xD8, 0xD7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0 */ 0x00, 0x00, 0x55, 0x00, 0xE4, 0x39, 0x00, 0x80, 0x77, 0x77, 0x03, 0x20, 0x57, 0x9b, 0xdf, 0x31, /* 0xd0 */ 0x64, 0xa8, 0xec, 0xd1, 0x0f, 0x11, 0x11, 0x81, 0x10, 0xC0, 0xAA, 0xAA, 0x00, 0x11, 0x00, 0x00, /* 0xe0 */ 0x11, 0x00, 0x00, 0x11, 0x00, 0x00, 0x11, 0x00, 0x83, 0xB5, 0x09, 0x78, 0x85, 0x00, 0x01, 0x20, /* 0xf0 */ 0x64, 0x51, 0x40, 0xaf, 0xFF, 0xF0, 0x00, 0xC0, }; static const u8 tbl_tw2865_pal_template[] = { 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, /* 0x00 */ 0x11, 0xff, 0x01, 0xc3, 0x00, 0x00, 0x01, 0x7f, 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, /* 0x10 */ 0x11, 0xff, 0x01, 0xc3, 0x00, 0x00, 0x01, 0x7f, 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, /* 0x20 */ 0x11, 0xff, 0x01, 0xc3, 0x00, 0x00, 0x01, 0x7f, 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, /* 0x30 */ 0x11, 0xff, 0x01, 0xc3, 0x00, 0x00, 0x01, 0x7f, 0x00, 0x94, 0x90, 0x48, 0x00, 0x38, 0x7F, 0x80, /* 0x40 */ 0x80, 0x80, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x21, 0x43, 0x08, 0x00, 0x00, 0x01, 0xf1, 0x03, 0xEF, 0x03, /* 0x70 */ 0xEA, 0x03, 0xD9, 0x15, 0x15, 0xE4, 0xA3, 0x80, 0x00, 0x02, 0x00, 0xCC, 0x00, 0x80, 0x44, 0x50, /* 0x80 */ 0x22, 0x01, 0xD8, 0xBC, 0xB8, 0x44, 0x38, 0x00, 0x00, 0x78, 0x44, 0x3D, 0x14, 0xA5, 0xE0, 0x05, /* 0x90 */ 0x00, 0x28, 0x44, 0x44, 0xA0, 0x90, 0x52, 0x13, 0x08, 0x08, 0x08, 0x08, 0x1A, 0x1A, 0x1A, 0x1A, /* 0xa0 */ 0x00, 0x00, 0x00, 0xF0, 0xF0, 0xF0, 0xF0, 0x44, 0x44, 0x4A, 0x00, 0xFF, 0xEF, 0xEF, 0xEF, 0xEF, /* 0xb0 */ 0xFF, 0xE7, 0xE9, 0xE9, 0xE9, 0xFF, 0xD7, 0xD8, 0xD9, 0xD8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0 */ 0x00, 0x00, 0x55, 0x00, 0xE4, 0x39, 0x00, 0x80, 0x77, 0x77, 0x03, 0x20, 0x57, 0x9b, 0xdf, 0x31, /* 0xd0 */ 0x64, 0xa8, 0xec, 0xd1, 0x0f, 0x11, 0x11, 0x81, 0x10, 0xC0, 0xAA, 0xAA, 0x00, 0x11, 0x00, 0x00, /* 0xe0 */ 0x11, 0x00, 0x00, 0x11, 0x00, 0x00, 0x11, 0x00, 0x83, 0xB5, 0x09, 0x00, 0xA0, 0x00, 0x01, 0x20, /* 0xf0 */ 0x64, 0x51, 0x40, 0xaf, 0xFF, 0xF0, 0x00, 0xC0, }; #define is_tw286x(__solo, __id) (!(__solo->tw2815 & (1 << __id))) static u8 tw_readbyte(struct solo_dev *solo_dev, int chip_id, u8 tw6x_off, u8 tw_off) { if (is_tw286x(solo_dev, chip_id)) return solo_i2c_readbyte(solo_dev, SOLO_I2C_TW, TW_CHIP_OFFSET_ADDR(chip_id), tw6x_off); else return solo_i2c_readbyte(solo_dev, SOLO_I2C_TW, TW_CHIP_OFFSET_ADDR(chip_id), tw_off); } static void tw_writebyte(struct solo_dev *solo_dev, int chip_id, u8 tw6x_off, u8 tw_off, u8 val) { if (is_tw286x(solo_dev, chip_id)) solo_i2c_writebyte(solo_dev, SOLO_I2C_TW, TW_CHIP_OFFSET_ADDR(chip_id), tw6x_off, val); else solo_i2c_writebyte(solo_dev, SOLO_I2C_TW, TW_CHIP_OFFSET_ADDR(chip_id), tw_off, val); } static void tw_write_and_verify(struct solo_dev *solo_dev, u8 addr, u8 off, u8 val) { int i; for (i = 0; i < 5; i++) { u8 rval = solo_i2c_readbyte(solo_dev, SOLO_I2C_TW, addr, off); if (rval == val) return; solo_i2c_writebyte(solo_dev, SOLO_I2C_TW, addr, off, val); msleep_interruptible(1); } /* printk("solo6x10/tw28: Error writing register: %02x->%02x [%02x]\n", */ /* addr, off, val); */ } static int tw2865_setup(struct solo_dev *solo_dev, u8 dev_addr) { u8 tbl_tw2865_common[256]; int i; if (solo_dev->video_type == SOLO_VO_FMT_TYPE_PAL) memcpy(tbl_tw2865_common, tbl_tw2865_pal_template, sizeof(tbl_tw2865_common)); else memcpy(tbl_tw2865_common, tbl_tw2865_ntsc_template, sizeof(tbl_tw2865_common)); /* ALINK Mode */ if (solo_dev->nr_chans == 4) { tbl_tw2865_common[0xd2] = 0x01; tbl_tw2865_common[0xcf] = 0x00; } else if (solo_dev->nr_chans == 8) { tbl_tw2865_common[0xd2] = 0x02; if (dev_addr == TW_CHIP_OFFSET_ADDR(1)) tbl_tw2865_common[0xcf] = 0x80; } else if (solo_dev->nr_chans == 16) { tbl_tw2865_common[0xd2] = 0x03; if (dev_addr == TW_CHIP_OFFSET_ADDR(1)) tbl_tw2865_common[0xcf] = 0x83; else if (dev_addr == TW_CHIP_OFFSET_ADDR(2)) tbl_tw2865_common[0xcf] = 0x83; else if (dev_addr == TW_CHIP_OFFSET_ADDR(3)) tbl_tw2865_common[0xcf] = 0x80; } for (i = 0; i < 0xff; i++) { /* Skip read only registers */ switch (i) { case 0xb8 ... 0xc1: case 0xc4 ... 0xc7: case 0xfd: continue; } switch (i & ~0x30) { case 0x00: case 0x0c ... 0x0d: continue; } tw_write_and_verify(solo_dev, dev_addr, i, tbl_tw2865_common[i]); } return 0; } static int tw2864_setup(struct solo_dev *solo_dev, u8 dev_addr) { u8 tbl_tw2864_common[256]; int i; if (solo_dev->video_type == SOLO_VO_FMT_TYPE_PAL) memcpy(tbl_tw2864_common, tbl_tw2864_pal_template, sizeof(tbl_tw2864_common)); else memcpy(tbl_tw2864_common, tbl_tw2864_ntsc_template, sizeof(tbl_tw2864_common)); if (solo_dev->tw2865 == 0) { /* IRQ Mode */ if (solo_dev->nr_chans == 4) { tbl_tw2864_common[0xd2] = 0x01; tbl_tw2864_common[0xcf] = 0x00; } else if (solo_dev->nr_chans == 8) { tbl_tw2864_common[0xd2] = 0x02; if (dev_addr == TW_CHIP_OFFSET_ADDR(0)) tbl_tw2864_common[0xcf] = 0x43; else if (dev_addr == TW_CHIP_OFFSET_ADDR(1)) tbl_tw2864_common[0xcf] = 0x40; } else if (solo_dev->nr_chans == 16) { tbl_tw2864_common[0xd2] = 0x03; if (dev_addr == TW_CHIP_OFFSET_ADDR(0)) tbl_tw2864_common[0xcf] = 0x43; else if (dev_addr == TW_CHIP_OFFSET_ADDR(1)) tbl_tw2864_common[0xcf] = 0x43; else if (dev_addr == TW_CHIP_OFFSET_ADDR(2)) tbl_tw2864_common[0xcf] = 0x43; else if (dev_addr == TW_CHIP_OFFSET_ADDR(3)) tbl_tw2864_common[0xcf] = 0x40; } } else { /* ALINK Mode. Assumes that the first tw28xx is a * 2865 and these are in cascade. */ for (i = 0; i <= 4; i++) tbl_tw2864_common[0x08 | i << 4] = 0x12; if (solo_dev->nr_chans == 8) { tbl_tw2864_common[0xd2] = 0x02; if (dev_addr == TW_CHIP_OFFSET_ADDR(1)) tbl_tw2864_common[0xcf] = 0x80; } else if (solo_dev->nr_chans == 16) { tbl_tw2864_common[0xd2] = 0x03; if (dev_addr == TW_CHIP_OFFSET_ADDR(1)) tbl_tw2864_common[0xcf] = 0x83; else if (dev_addr == TW_CHIP_OFFSET_ADDR(2)) tbl_tw2864_common[0xcf] = 0x83; else if (dev_addr == TW_CHIP_OFFSET_ADDR(3)) tbl_tw2864_common[0xcf] = 0x80; } } for (i = 0; i < 0xff; i++) { /* Skip read only registers */ switch (i) { case 0xb8 ... 0xc1: case 0xfd: continue; } switch (i & ~0x30) { case 0x00: case 0x0c: case 0x0d: continue; } tw_write_and_verify(solo_dev, dev_addr, i, tbl_tw2864_common[i]); } return 0; } static int tw2815_setup(struct solo_dev *solo_dev, u8 dev_addr) { u8 tbl_ntsc_tw2815_common[] = { 0x00, 0xc8, 0x20, 0xd0, 0x06, 0xf0, 0x08, 0x80, 0x80, 0x80, 0x80, 0x02, 0x06, 0x00, 0x11, }; u8 tbl_pal_tw2815_common[] = { 0x00, 0x88, 0x20, 0xd0, 0x05, 0x20, 0x28, 0x80, 0x80, 0x80, 0x80, 0x82, 0x06, 0x00, 0x11, }; u8 tbl_tw2815_sfr[] = { 0x00, 0x00, 0x00, 0xc0, 0x45, 0xa0, 0xd0, 0x2f, /* 0x00 */ 0x64, 0x80, 0x80, 0x82, 0x82, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x05, 0x00, 0x00, 0x80, 0x06, 0x00, /* 0x10 */ 0x00, 0x00, 0x00, 0xff, 0x8f, 0x00, 0x00, 0x00, 0x88, 0x88, 0xc0, 0x00, 0x20, 0x64, 0xa8, 0xec, /* 0x20 */ 0x31, 0x75, 0xb9, 0xfd, 0x00, 0x00, 0x88, 0x88, 0x88, 0x11, 0x00, 0x88, 0x88, 0x00, /* 0x30 */ }; u8 *tbl_tw2815_common; int i; int ch; tbl_ntsc_tw2815_common[0x06] = 0; /* Horizontal Delay Control */ tbl_ntsc_tw2815_common[0x02] = DEFAULT_HDELAY_NTSC & 0xff; tbl_ntsc_tw2815_common[0x06] |= 0x03 & (DEFAULT_HDELAY_NTSC >> 8); /* Horizontal Active Control */ tbl_ntsc_tw2815_common[0x03] = DEFAULT_HACTIVE_NTSC & 0xff; tbl_ntsc_tw2815_common[0x06] |= ((0x03 & (DEFAULT_HACTIVE_NTSC >> 8)) << 2); /* Vertical Delay Control */ tbl_ntsc_tw2815_common[0x04] = DEFAULT_VDELAY_NTSC & 0xff; tbl_ntsc_tw2815_common[0x06] |= ((0x01 & (DEFAULT_VDELAY_NTSC >> 8)) << 4); /* Vertical Active Control */ tbl_ntsc_tw2815_common[0x05] = DEFAULT_VACTIVE_NTSC & 0xff; tbl_ntsc_tw2815_common[0x06] |= ((0x01 & (DEFAULT_VACTIVE_NTSC >> 8)) << 5); tbl_pal_tw2815_common[0x06] = 0; /* Horizontal Delay Control */ tbl_pal_tw2815_common[0x02] = DEFAULT_HDELAY_PAL & 0xff; tbl_pal_tw2815_common[0x06] |= 0x03 & (DEFAULT_HDELAY_PAL >> 8); /* Horizontal Active Control */ tbl_pal_tw2815_common[0x03] = DEFAULT_HACTIVE_PAL & 0xff; tbl_pal_tw2815_common[0x06] |= ((0x03 & (DEFAULT_HACTIVE_PAL >> 8)) << 2); /* Vertical Delay Control */ tbl_pal_tw2815_common[0x04] = DEFAULT_VDELAY_PAL & 0xff; tbl_pal_tw2815_common[0x06] |= ((0x01 & (DEFAULT_VDELAY_PAL >> 8)) << 4); /* Vertical Active Control */ tbl_pal_tw2815_common[0x05] = DEFAULT_VACTIVE_PAL & 0xff; tbl_pal_tw2815_common[0x06] |= ((0x01 & (DEFAULT_VACTIVE_PAL >> 8)) << 5); tbl_tw2815_common = (solo_dev->video_type == SOLO_VO_FMT_TYPE_NTSC) ? tbl_ntsc_tw2815_common : tbl_pal_tw2815_common; /* Dual ITU-R BT.656 format */ tbl_tw2815_common[0x0d] |= 0x04; /* Audio configuration */ tbl_tw2815_sfr[0x62 - 0x40] &= ~(3 << 6); if (solo_dev->nr_chans == 4) { tbl_tw2815_sfr[0x63 - 0x40] |= 1; tbl_tw2815_sfr[0x62 - 0x40] |= 3 << 6; } else if (solo_dev->nr_chans == 8) { tbl_tw2815_sfr[0x63 - 0x40] |= 2; if (dev_addr == TW_CHIP_OFFSET_ADDR(0)) tbl_tw2815_sfr[0x62 - 0x40] |= 1 << 6; else if (dev_addr == TW_CHIP_OFFSET_ADDR(1)) tbl_tw2815_sfr[0x62 - 0x40] |= 2 << 6; } else if (solo_dev->nr_chans == 16) { tbl_tw2815_sfr[0x63 - 0x40] |= 3; if (dev_addr == TW_CHIP_OFFSET_ADDR(0)) tbl_tw2815_sfr[0x62 - 0x40] |= 1 << 6; else if (dev_addr == TW_CHIP_OFFSET_ADDR(1)) tbl_tw2815_sfr[0x62 - 0x40] |= 0 << 6; else if (dev_addr == TW_CHIP_OFFSET_ADDR(2)) tbl_tw2815_sfr[0x62 - 0x40] |= 0 << 6; else if (dev_addr == TW_CHIP_OFFSET_ADDR(3)) tbl_tw2815_sfr[0x62 - 0x40] |= 2 << 6; } /* Output mode of R_ADATM pin (0 mixing, 1 record) */ /* tbl_tw2815_sfr[0x63 - 0x40] |= 0 << 2; */ /* 8KHz, used to be 16KHz, but changed for remote client compat */ tbl_tw2815_sfr[0x62 - 0x40] |= 0 << 2; tbl_tw2815_sfr[0x6c - 0x40] |= 0 << 2; /* Playback of right channel */ tbl_tw2815_sfr[0x6c - 0x40] |= 1 << 5; /* Reserved value (XXX ??) */ tbl_tw2815_sfr[0x5c - 0x40] |= 1 << 5; /* Analog output gain and mix ratio playback on full */ tbl_tw2815_sfr[0x70 - 0x40] |= 0xff; /* Select playback audio and mute all except */ tbl_tw2815_sfr[0x71 - 0x40] |= 0x10; tbl_tw2815_sfr[0x6d - 0x40] |= 0x0f; /* End of audio configuration */ for (ch = 0; ch < 4; ch++) { tbl_tw2815_common[0x0d] &= ~3; switch (ch) { case 0: tbl_tw2815_common[0x0d] |= 0x21; break; case 1: tbl_tw2815_common[0x0d] |= 0x20; break; case 2: tbl_tw2815_common[0x0d] |= 0x23; break; case 3: tbl_tw2815_common[0x0d] |= 0x22; break; } for (i = 0; i < 0x0f; i++) { if (i == 0x00) continue; /* read-only */ solo_i2c_writebyte(solo_dev, SOLO_I2C_TW, dev_addr, (ch * 0x10) + i, tbl_tw2815_common[i]); } } for (i = 0x40; i < 0x76; i++) { /* Skip read-only and nop registers */ if (i == 0x40 || i == 0x59 || i == 0x5a || i == 0x5d || i == 0x5e || i == 0x5f) continue; solo_i2c_writebyte(solo_dev, SOLO_I2C_TW, dev_addr, i, tbl_tw2815_sfr[i - 0x40]); } return 0; } #define FIRST_ACTIVE_LINE 0x0008 #define LAST_ACTIVE_LINE 0x0102 static void saa712x_write_regs(struct solo_dev *dev, const u8 *vals, int start, int n) { for (; start < n; start++, vals++) { /* Skip read-only registers */ switch (start) { /* case 0x00 ... 0x25: */ case 0x2e ... 0x37: case 0x60: case 0x7d: continue; } solo_i2c_writebyte(dev, SOLO_I2C_SAA, 0x46, start, *vals); } } #define SAA712x_reg7c (0x80 | ((LAST_ACTIVE_LINE & 0x100) >> 2) \ | ((FIRST_ACTIVE_LINE & 0x100) >> 4)) static void saa712x_setup(struct solo_dev *dev) { const int reg_start = 0x26; const u8 saa7128_regs_ntsc[] = { /* :0x26 */ 0x0d, 0x00, /* :0x28 */ 0x59, 0x1d, 0x75, 0x3f, 0x06, 0x3f, /* :0x2e XXX: read-only */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* :0x38 */ 0x1a, 0x1a, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, /* :0x40 */ 0x00, 0x00, 0x00, 0x68, 0x10, 0x97, 0x4c, 0x18, 0x9b, 0x93, 0x9f, 0xff, 0x7c, 0x34, 0x3f, 0x3f, /* :0x50 */ 0x3f, 0x83, 0x83, 0x80, 0x0d, 0x0f, 0xc3, 0x06, 0x02, 0x80, 0x71, 0x77, 0xa7, 0x67, 0x66, 0x2e, /* :0x60 */ 0x7b, 0x11, 0x4f, 0x1f, 0x7c, 0xf0, 0x21, 0x77, 0x41, 0x88, 0x41, 0x52, 0xed, 0x10, 0x10, 0x00, /* :0x70 */ 0x41, 0xc3, 0x00, 0x3e, 0xb8, 0x02, 0x00, 0x00, 0x00, 0x00, FIRST_ACTIVE_LINE, LAST_ACTIVE_LINE & 0xff, SAA712x_reg7c, 0x00, 0xff, 0xff, }, saa7128_regs_pal[] = { /* :0x26 */ 0x0d, 0x00, /* :0x28 */ 0xe1, 0x1d, 0x75, 0x3f, 0x06, 0x3f, /* :0x2e XXX: read-only */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* :0x38 */ 0x1a, 0x1a, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, /* :0x40 */ 0x00, 0x00, 0x00, 0x68, 0x10, 0x97, 0x4c, 0x18, 0x9b, 0x93, 0x9f, 0xff, 0x7c, 0x34, 0x3f, 0x3f, /* :0x50 */ 0x3f, 0x83, 0x83, 0x80, 0x0d, 0x0f, 0xc3, 0x06, 0x02, 0x80, 0x0f, 0x77, 0xa7, 0x67, 0x66, 0x2e, /* :0x60 */ 0x7b, 0x02, 0x35, 0xcb, 0x8a, 0x09, 0x2a, 0x77, 0x41, 0x88, 0x41, 0x52, 0xf1, 0x10, 0x20, 0x00, /* :0x70 */ 0x41, 0xc3, 0x00, 0x3e, 0xb8, 0x02, 0x00, 0x00, 0x00, 0x00, 0x12, 0x30, SAA712x_reg7c | 0x40, 0x00, 0xff, 0xff, }; if (dev->video_type == SOLO_VO_FMT_TYPE_PAL) saa712x_write_regs(dev, saa7128_regs_pal, reg_start, sizeof(saa7128_regs_pal)); else saa712x_write_regs(dev, saa7128_regs_ntsc, reg_start, sizeof(saa7128_regs_ntsc)); } int solo_tw28_init(struct solo_dev *solo_dev) { int i; u8 value; solo_dev->tw28_cnt = 0; /* Detect techwell chip type(s) */ for (i = 0; i < solo_dev->nr_chans / 4; i++) { value = solo_i2c_readbyte(solo_dev, SOLO_I2C_TW, TW_CHIP_OFFSET_ADDR(i), 0xFF); switch (value >> 3) { case 0x18: solo_dev->tw2865 |= 1 << i; solo_dev->tw28_cnt++; break; case 0x0c: solo_dev->tw2864 |= 1 << i; solo_dev->tw28_cnt++; break; default: value = solo_i2c_readbyte(solo_dev, SOLO_I2C_TW, TW_CHIP_OFFSET_ADDR(i), 0x59); if ((value >> 3) == 0x04) { solo_dev->tw2815 |= 1 << i; solo_dev->tw28_cnt++; } } } if (solo_dev->tw28_cnt != (solo_dev->nr_chans >> 2)) { dev_err(&solo_dev->pdev->dev, "Could not initialize any techwell chips\n"); return -EINVAL; } saa712x_setup(solo_dev); for (i = 0; i < solo_dev->tw28_cnt; i++) { if ((solo_dev->tw2865 & (1 << i))) tw2865_setup(solo_dev, TW_CHIP_OFFSET_ADDR(i)); else if ((solo_dev->tw2864 & (1 << i))) tw2864_setup(solo_dev, TW_CHIP_OFFSET_ADDR(i)); else tw2815_setup(solo_dev, TW_CHIP_OFFSET_ADDR(i)); } return 0; } /* * We accessed the video status signal in the Techwell chip through * iic/i2c because the video status reported by register REG_VI_STATUS1 * (address 0x012C) of the SOLO6010 chip doesn't give the correct video * status signal values. */ int tw28_get_video_status(struct solo_dev *solo_dev, u8 ch) { u8 val, chip_num; /* Get the right chip and on-chip channel */ chip_num = ch / 4; ch %= 4; val = tw_readbyte(solo_dev, chip_num, TW286x_AV_STAT_ADDR, TW_AV_STAT_ADDR) & 0x0f; return val & (1 << ch) ? 1 : 0; } #if 0 /* Status of audio from up to 4 techwell chips are combined into 1 variable. * See techwell datasheet for details. */ u16 tw28_get_audio_status(struct solo_dev *solo_dev) { u8 val; u16 status = 0; int i; for (i = 0; i < solo_dev->tw28_cnt; i++) { val = (tw_readbyte(solo_dev, i, TW286x_AV_STAT_ADDR, TW_AV_STAT_ADDR) & 0xf0) >> 4; status |= val << (i * 4); } return status; } #endif bool tw28_has_sharpness(struct solo_dev *solo_dev, u8 ch) { return is_tw286x(solo_dev, ch / 4); } int tw28_set_ctrl_val(struct solo_dev *solo_dev, u32 ctrl, u8 ch, s32 val) { char sval; u8 chip_num; /* Get the right chip and on-chip channel */ chip_num = ch / 4; ch %= 4; if (val > 255 || val < 0) return -ERANGE; switch (ctrl) { case V4L2_CID_SHARPNESS: /* Only 286x has sharpness */ if (is_tw286x(solo_dev, chip_num)) { u8 v = solo_i2c_readbyte(solo_dev, SOLO_I2C_TW, TW_CHIP_OFFSET_ADDR(chip_num), TW286x_SHARPNESS(chip_num)); v &= 0xf0; v |= val; solo_i2c_writebyte(solo_dev, SOLO_I2C_TW, TW_CHIP_OFFSET_ADDR(chip_num), TW286x_SHARPNESS(chip_num), v); } else { return -EINVAL; } break; case V4L2_CID_HUE: if (is_tw286x(solo_dev, chip_num)) sval = val - 128; else sval = (char)val; tw_writebyte(solo_dev, chip_num, TW286x_HUE_ADDR(ch), TW_HUE_ADDR(ch), sval); break; case V4L2_CID_SATURATION: /* 286x chips have a U and V component for saturation */ if (is_tw286x(solo_dev, chip_num)) { solo_i2c_writebyte(solo_dev, SOLO_I2C_TW, TW_CHIP_OFFSET_ADDR(chip_num), TW286x_SATURATIONU_ADDR(ch), val); } tw_writebyte(solo_dev, chip_num, TW286x_SATURATIONV_ADDR(ch), TW_SATURATION_ADDR(ch), val); break; case V4L2_CID_CONTRAST: tw_writebyte(solo_dev, chip_num, TW286x_CONTRAST_ADDR(ch), TW_CONTRAST_ADDR(ch), val); break; case V4L2_CID_BRIGHTNESS: if (is_tw286x(solo_dev, chip_num)) sval = val - 128; else sval = (char)val; tw_writebyte(solo_dev, chip_num, TW286x_BRIGHTNESS_ADDR(ch), TW_BRIGHTNESS_ADDR(ch), sval); break; default: return -EINVAL; } return 0; } int tw28_get_ctrl_val(struct solo_dev *solo_dev, u32 ctrl, u8 ch, s32 *val) { u8 rval, chip_num; /* Get the right chip and on-chip channel */ chip_num = ch / 4; ch %= 4; switch (ctrl) { case V4L2_CID_SHARPNESS: /* Only 286x has sharpness */ if (is_tw286x(solo_dev, chip_num)) { rval = solo_i2c_readbyte(solo_dev, SOLO_I2C_TW, TW_CHIP_OFFSET_ADDR(chip_num), TW286x_SHARPNESS(chip_num)); *val = rval & 0x0f; } else *val = 0; break; case V4L2_CID_HUE: rval = tw_readbyte(solo_dev, chip_num, TW286x_HUE_ADDR(ch), TW_HUE_ADDR(ch)); if (is_tw286x(solo_dev, chip_num)) *val = (s32)((char)rval) + 128; else *val = rval; break; case V4L2_CID_SATURATION: *val = tw_readbyte(solo_dev, chip_num, TW286x_SATURATIONU_ADDR(ch), TW_SATURATION_ADDR(ch)); break; case V4L2_CID_CONTRAST: *val = tw_readbyte(solo_dev, chip_num, TW286x_CONTRAST_ADDR(ch), TW_CONTRAST_ADDR(ch)); break; case V4L2_CID_BRIGHTNESS: rval = tw_readbyte(solo_dev, chip_num, TW286x_BRIGHTNESS_ADDR(ch), TW_BRIGHTNESS_ADDR(ch)); if (is_tw286x(solo_dev, chip_num)) *val = (s32)((char)rval) + 128; else *val = rval; break; default: return -EINVAL; } return 0; } #if 0 /* * For audio output volume, the output channel is only 1. In this case we * don't need to offset TW_CHIP_OFFSET_ADDR. The TW_CHIP_OFFSET_ADDR used * is the base address of the techwell chip. */ void tw2815_Set_AudioOutVol(struct solo_dev *solo_dev, unsigned int u_val) { unsigned int val; unsigned int chip_num; chip_num = (solo_dev->nr_chans - 1) / 4; val = tw_readbyte(solo_dev, chip_num, TW286x_AUDIO_OUTPUT_VOL_ADDR, TW_AUDIO_OUTPUT_VOL_ADDR); u_val = (val & 0x0f) | (u_val << 4); tw_writebyte(solo_dev, chip_num, TW286x_AUDIO_OUTPUT_VOL_ADDR, TW_AUDIO_OUTPUT_VOL_ADDR, u_val); } #endif u8 tw28_get_audio_gain(struct solo_dev *solo_dev, u8 ch) { u8 val; u8 chip_num; /* Get the right chip and on-chip channel */ chip_num = ch / 4; ch %= 4; val = tw_readbyte(solo_dev, chip_num, TW286x_AUDIO_INPUT_GAIN_ADDR(ch), TW_AUDIO_INPUT_GAIN_ADDR(ch)); return (ch % 2) ? (val >> 4) : (val & 0x0f); } void tw28_set_audio_gain(struct solo_dev *solo_dev, u8 ch, u8 val) { u8 old_val; u8 chip_num; /* Get the right chip and on-chip channel */ chip_num = ch / 4; ch %= 4; old_val = tw_readbyte(solo_dev, chip_num, TW286x_AUDIO_INPUT_GAIN_ADDR(ch), TW_AUDIO_INPUT_GAIN_ADDR(ch)); val = (old_val & ((ch % 2) ? 0x0f : 0xf0)) | ((ch % 2) ? (val << 4) : val); tw_writebyte(solo_dev, chip_num, TW286x_AUDIO_INPUT_GAIN_ADDR(ch), TW_AUDIO_INPUT_GAIN_ADDR(ch), val); }
gpl-2.0
titanxxh/xengt-ha-kernel
drivers/gpu/drm/exynos/exynos_dp_reg.c
1543
32506
/* * Samsung DP (Display port) register interface driver. * * Copyright (C) 2012 Samsung Electronics Co., Ltd. * Author: Jingoo Han <jg1.han@samsung.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/device.h> #include <linux/io.h> #include <linux/delay.h> #include <linux/gpio.h> #include "exynos_dp_core.h" #include "exynos_dp_reg.h" #define COMMON_INT_MASK_1 0 #define COMMON_INT_MASK_2 0 #define COMMON_INT_MASK_3 0 #define COMMON_INT_MASK_4 (HOTPLUG_CHG | HPD_LOST | PLUG) #define INT_STA_MASK INT_HPD void exynos_dp_enable_video_mute(struct exynos_dp_device *dp, bool enable) { u32 reg; if (enable) { reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_1); reg |= HDCP_VIDEO_MUTE; writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_1); } else { reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_1); reg &= ~HDCP_VIDEO_MUTE; writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_1); } } void exynos_dp_stop_video(struct exynos_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_1); reg &= ~VIDEO_EN; writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_1); } void exynos_dp_lane_swap(struct exynos_dp_device *dp, bool enable) { u32 reg; if (enable) reg = LANE3_MAP_LOGIC_LANE_0 | LANE2_MAP_LOGIC_LANE_1 | LANE1_MAP_LOGIC_LANE_2 | LANE0_MAP_LOGIC_LANE_3; else reg = LANE3_MAP_LOGIC_LANE_3 | LANE2_MAP_LOGIC_LANE_2 | LANE1_MAP_LOGIC_LANE_1 | LANE0_MAP_LOGIC_LANE_0; writel(reg, dp->reg_base + EXYNOS_DP_LANE_MAP); } void exynos_dp_init_analog_param(struct exynos_dp_device *dp) { u32 reg; reg = TX_TERMINAL_CTRL_50_OHM; writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_1); reg = SEL_24M | TX_DVDD_BIT_1_0625V; writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_2); reg = DRIVE_DVDD_BIT_1_0625V | VCO_BIT_600_MICRO; writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_3); reg = PD_RING_OSC | AUX_TERMINAL_CTRL_50_OHM | TX_CUR1_2X | TX_CUR_16_MA; writel(reg, dp->reg_base + EXYNOS_DP_PLL_FILTER_CTL_1); reg = CH3_AMP_400_MV | CH2_AMP_400_MV | CH1_AMP_400_MV | CH0_AMP_400_MV; writel(reg, dp->reg_base + EXYNOS_DP_TX_AMP_TUNING_CTL); } void exynos_dp_init_interrupt(struct exynos_dp_device *dp) { /* Set interrupt pin assertion polarity as high */ writel(INT_POL1 | INT_POL0, dp->reg_base + EXYNOS_DP_INT_CTL); /* Clear pending regisers */ writel(0xff, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_1); writel(0x4f, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_2); writel(0xe0, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_3); writel(0xe7, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_4); writel(0x63, dp->reg_base + EXYNOS_DP_INT_STA); /* 0:mask,1: unmask */ writel(0x00, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_1); writel(0x00, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_2); writel(0x00, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_3); writel(0x00, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_4); writel(0x00, dp->reg_base + EXYNOS_DP_INT_STA_MASK); } void exynos_dp_reset(struct exynos_dp_device *dp) { u32 reg; exynos_dp_stop_video(dp); exynos_dp_enable_video_mute(dp, 0); reg = MASTER_VID_FUNC_EN_N | SLAVE_VID_FUNC_EN_N | AUD_FIFO_FUNC_EN_N | AUD_FUNC_EN_N | HDCP_FUNC_EN_N | SW_FUNC_EN_N; writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_1); reg = SSC_FUNC_EN_N | AUX_FUNC_EN_N | SERDES_FIFO_FUNC_EN_N | LS_CLK_DOMAIN_FUNC_EN_N; writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_2); usleep_range(20, 30); exynos_dp_lane_swap(dp, 0); writel(0x0, dp->reg_base + EXYNOS_DP_SYS_CTL_1); writel(0x40, dp->reg_base + EXYNOS_DP_SYS_CTL_2); writel(0x0, dp->reg_base + EXYNOS_DP_SYS_CTL_3); writel(0x0, dp->reg_base + EXYNOS_DP_SYS_CTL_4); writel(0x0, dp->reg_base + EXYNOS_DP_PKT_SEND_CTL); writel(0x0, dp->reg_base + EXYNOS_DP_HDCP_CTL); writel(0x5e, dp->reg_base + EXYNOS_DP_HPD_DEGLITCH_L); writel(0x1a, dp->reg_base + EXYNOS_DP_HPD_DEGLITCH_H); writel(0x10, dp->reg_base + EXYNOS_DP_LINK_DEBUG_CTL); writel(0x0, dp->reg_base + EXYNOS_DP_PHY_TEST); writel(0x0, dp->reg_base + EXYNOS_DP_VIDEO_FIFO_THRD); writel(0x20, dp->reg_base + EXYNOS_DP_AUDIO_MARGIN); writel(0x4, dp->reg_base + EXYNOS_DP_M_VID_GEN_FILTER_TH); writel(0x2, dp->reg_base + EXYNOS_DP_M_AUD_GEN_FILTER_TH); writel(0x00000101, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL); } void exynos_dp_swreset(struct exynos_dp_device *dp) { writel(RESET_DP_TX, dp->reg_base + EXYNOS_DP_TX_SW_RESET); } void exynos_dp_config_interrupt(struct exynos_dp_device *dp) { u32 reg; /* 0: mask, 1: unmask */ reg = COMMON_INT_MASK_1; writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_1); reg = COMMON_INT_MASK_2; writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_2); reg = COMMON_INT_MASK_3; writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_3); reg = COMMON_INT_MASK_4; writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_4); reg = INT_STA_MASK; writel(reg, dp->reg_base + EXYNOS_DP_INT_STA_MASK); } enum pll_status exynos_dp_get_pll_lock_status(struct exynos_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_DEBUG_CTL); if (reg & PLL_LOCK) return PLL_LOCKED; else return PLL_UNLOCKED; } void exynos_dp_set_pll_power_down(struct exynos_dp_device *dp, bool enable) { u32 reg; if (enable) { reg = readl(dp->reg_base + EXYNOS_DP_PLL_CTL); reg |= DP_PLL_PD; writel(reg, dp->reg_base + EXYNOS_DP_PLL_CTL); } else { reg = readl(dp->reg_base + EXYNOS_DP_PLL_CTL); reg &= ~DP_PLL_PD; writel(reg, dp->reg_base + EXYNOS_DP_PLL_CTL); } } void exynos_dp_set_analog_power_down(struct exynos_dp_device *dp, enum analog_power_block block, bool enable) { u32 reg; switch (block) { case AUX_BLOCK: if (enable) { reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); reg |= AUX_PD; writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); } else { reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); reg &= ~AUX_PD; writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); } break; case CH0_BLOCK: if (enable) { reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); reg |= CH0_PD; writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); } else { reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); reg &= ~CH0_PD; writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); } break; case CH1_BLOCK: if (enable) { reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); reg |= CH1_PD; writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); } else { reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); reg &= ~CH1_PD; writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); } break; case CH2_BLOCK: if (enable) { reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); reg |= CH2_PD; writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); } else { reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); reg &= ~CH2_PD; writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); } break; case CH3_BLOCK: if (enable) { reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); reg |= CH3_PD; writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); } else { reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); reg &= ~CH3_PD; writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); } break; case ANALOG_TOTAL: if (enable) { reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); reg |= DP_PHY_PD; writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); } else { reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); reg &= ~DP_PHY_PD; writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); } break; case POWER_ALL: if (enable) { reg = DP_PHY_PD | AUX_PD | CH3_PD | CH2_PD | CH1_PD | CH0_PD; writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); } else { writel(0x00, dp->reg_base + EXYNOS_DP_PHY_PD); } break; default: break; } } void exynos_dp_init_analog_func(struct exynos_dp_device *dp) { u32 reg; int timeout_loop = 0; exynos_dp_set_analog_power_down(dp, POWER_ALL, 0); reg = PLL_LOCK_CHG; writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_1); reg = readl(dp->reg_base + EXYNOS_DP_DEBUG_CTL); reg &= ~(F_PLL_LOCK | PLL_LOCK_CTRL); writel(reg, dp->reg_base + EXYNOS_DP_DEBUG_CTL); /* Power up PLL */ if (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) { exynos_dp_set_pll_power_down(dp, 0); while (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) { timeout_loop++; if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) { dev_err(dp->dev, "failed to get pll lock status\n"); return; } usleep_range(10, 20); } } /* Enable Serdes FIFO function and Link symbol clock domain module */ reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_2); reg &= ~(SERDES_FIFO_FUNC_EN_N | LS_CLK_DOMAIN_FUNC_EN_N | AUX_FUNC_EN_N); writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_2); } void exynos_dp_clear_hotplug_interrupts(struct exynos_dp_device *dp) { u32 reg; if (gpio_is_valid(dp->hpd_gpio)) return; reg = HOTPLUG_CHG | HPD_LOST | PLUG; writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_4); reg = INT_HPD; writel(reg, dp->reg_base + EXYNOS_DP_INT_STA); } void exynos_dp_init_hpd(struct exynos_dp_device *dp) { u32 reg; if (gpio_is_valid(dp->hpd_gpio)) return; exynos_dp_clear_hotplug_interrupts(dp); reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3); reg &= ~(F_HPD | HPD_CTRL); writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_3); } enum dp_irq_type exynos_dp_get_irq_type(struct exynos_dp_device *dp) { u32 reg; if (gpio_is_valid(dp->hpd_gpio)) { reg = gpio_get_value(dp->hpd_gpio); if (reg) return DP_IRQ_TYPE_HP_CABLE_IN; else return DP_IRQ_TYPE_HP_CABLE_OUT; } else { /* Parse hotplug interrupt status register */ reg = readl(dp->reg_base + EXYNOS_DP_COMMON_INT_STA_4); if (reg & PLUG) return DP_IRQ_TYPE_HP_CABLE_IN; if (reg & HPD_LOST) return DP_IRQ_TYPE_HP_CABLE_OUT; if (reg & HOTPLUG_CHG) return DP_IRQ_TYPE_HP_CHANGE; return DP_IRQ_TYPE_UNKNOWN; } } void exynos_dp_reset_aux(struct exynos_dp_device *dp) { u32 reg; /* Disable AUX channel module */ reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_2); reg |= AUX_FUNC_EN_N; writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_2); } void exynos_dp_init_aux(struct exynos_dp_device *dp) { u32 reg; /* Clear inerrupts related to AUX channel */ reg = RPLY_RECEIV | AUX_ERR; writel(reg, dp->reg_base + EXYNOS_DP_INT_STA); exynos_dp_reset_aux(dp); /* Disable AUX transaction H/W retry */ reg = AUX_BIT_PERIOD_EXPECTED_DELAY(3) | AUX_HW_RETRY_COUNT_SEL(0)| AUX_HW_RETRY_INTERVAL_600_MICROSECONDS; writel(reg, dp->reg_base + EXYNOS_DP_AUX_HW_RETRY_CTL); /* Receive AUX Channel DEFER commands equal to DEFFER_COUNT*64 */ reg = DEFER_CTRL_EN | DEFER_COUNT(1); writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_DEFER_CTL); /* Enable AUX channel module */ reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_2); reg &= ~AUX_FUNC_EN_N; writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_2); } int exynos_dp_get_plug_in_status(struct exynos_dp_device *dp) { u32 reg; if (gpio_is_valid(dp->hpd_gpio)) { if (gpio_get_value(dp->hpd_gpio)) return 0; } else { reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3); if (reg & HPD_STATUS) return 0; } return -EINVAL; } void exynos_dp_enable_sw_function(struct exynos_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_1); reg &= ~SW_FUNC_EN_N; writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_1); } int exynos_dp_start_aux_transaction(struct exynos_dp_device *dp) { int reg; int retval = 0; int timeout_loop = 0; /* Enable AUX CH operation */ reg = readl(dp->reg_base + EXYNOS_DP_AUX_CH_CTL_2); reg |= AUX_EN; writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_2); /* Is AUX CH command reply received? */ reg = readl(dp->reg_base + EXYNOS_DP_INT_STA); while (!(reg & RPLY_RECEIV)) { timeout_loop++; if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) { dev_err(dp->dev, "AUX CH command reply failed!\n"); return -ETIMEDOUT; } reg = readl(dp->reg_base + EXYNOS_DP_INT_STA); usleep_range(10, 11); } /* Clear interrupt source for AUX CH command reply */ writel(RPLY_RECEIV, dp->reg_base + EXYNOS_DP_INT_STA); /* Clear interrupt source for AUX CH access error */ reg = readl(dp->reg_base + EXYNOS_DP_INT_STA); if (reg & AUX_ERR) { writel(AUX_ERR, dp->reg_base + EXYNOS_DP_INT_STA); return -EREMOTEIO; } /* Check AUX CH error access status */ reg = readl(dp->reg_base + EXYNOS_DP_AUX_CH_STA); if ((reg & AUX_STATUS_MASK) != 0) { dev_err(dp->dev, "AUX CH error happens: %d\n\n", reg & AUX_STATUS_MASK); return -EREMOTEIO; } return retval; } int exynos_dp_write_byte_to_dpcd(struct exynos_dp_device *dp, unsigned int reg_addr, unsigned char data) { u32 reg; int i; int retval; for (i = 0; i < 3; i++) { /* Clear AUX CH data buffer */ reg = BUF_CLR; writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL); /* Select DPCD device address */ reg = AUX_ADDR_7_0(reg_addr); writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0); reg = AUX_ADDR_15_8(reg_addr); writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8); reg = AUX_ADDR_19_16(reg_addr); writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16); /* Write data buffer */ reg = (unsigned int)data; writel(reg, dp->reg_base + EXYNOS_DP_BUF_DATA_0); /* * Set DisplayPort transaction and write 1 byte * If bit 3 is 1, DisplayPort transaction. * If Bit 3 is 0, I2C transaction. */ reg = AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_WRITE; writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1); /* Start AUX transaction */ retval = exynos_dp_start_aux_transaction(dp); if (retval == 0) break; else dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__); } return retval; } int exynos_dp_read_byte_from_dpcd(struct exynos_dp_device *dp, unsigned int reg_addr, unsigned char *data) { u32 reg; int i; int retval; for (i = 0; i < 3; i++) { /* Clear AUX CH data buffer */ reg = BUF_CLR; writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL); /* Select DPCD device address */ reg = AUX_ADDR_7_0(reg_addr); writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0); reg = AUX_ADDR_15_8(reg_addr); writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8); reg = AUX_ADDR_19_16(reg_addr); writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16); /* * Set DisplayPort transaction and read 1 byte * If bit 3 is 1, DisplayPort transaction. * If Bit 3 is 0, I2C transaction. */ reg = AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_READ; writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1); /* Start AUX transaction */ retval = exynos_dp_start_aux_transaction(dp); if (retval == 0) break; else dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__); } /* Read data buffer */ reg = readl(dp->reg_base + EXYNOS_DP_BUF_DATA_0); *data = (unsigned char)(reg & 0xff); return retval; } int exynos_dp_write_bytes_to_dpcd(struct exynos_dp_device *dp, unsigned int reg_addr, unsigned int count, unsigned char data[]) { u32 reg; unsigned int start_offset; unsigned int cur_data_count; unsigned int cur_data_idx; int i; int retval = 0; /* Clear AUX CH data buffer */ reg = BUF_CLR; writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL); start_offset = 0; while (start_offset < count) { /* Buffer size of AUX CH is 16 * 4bytes */ if ((count - start_offset) > 16) cur_data_count = 16; else cur_data_count = count - start_offset; for (i = 0; i < 3; i++) { /* Select DPCD device address */ reg = AUX_ADDR_7_0(reg_addr + start_offset); writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0); reg = AUX_ADDR_15_8(reg_addr + start_offset); writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8); reg = AUX_ADDR_19_16(reg_addr + start_offset); writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16); for (cur_data_idx = 0; cur_data_idx < cur_data_count; cur_data_idx++) { reg = data[start_offset + cur_data_idx]; writel(reg, dp->reg_base + EXYNOS_DP_BUF_DATA_0 + 4 * cur_data_idx); } /* * Set DisplayPort transaction and write * If bit 3 is 1, DisplayPort transaction. * If Bit 3 is 0, I2C transaction. */ reg = AUX_LENGTH(cur_data_count) | AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_WRITE; writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1); /* Start AUX transaction */ retval = exynos_dp_start_aux_transaction(dp); if (retval == 0) break; else dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__); } start_offset += cur_data_count; } return retval; } int exynos_dp_read_bytes_from_dpcd(struct exynos_dp_device *dp, unsigned int reg_addr, unsigned int count, unsigned char data[]) { u32 reg; unsigned int start_offset; unsigned int cur_data_count; unsigned int cur_data_idx; int i; int retval = 0; /* Clear AUX CH data buffer */ reg = BUF_CLR; writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL); start_offset = 0; while (start_offset < count) { /* Buffer size of AUX CH is 16 * 4bytes */ if ((count - start_offset) > 16) cur_data_count = 16; else cur_data_count = count - start_offset; /* AUX CH Request Transaction process */ for (i = 0; i < 3; i++) { /* Select DPCD device address */ reg = AUX_ADDR_7_0(reg_addr + start_offset); writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0); reg = AUX_ADDR_15_8(reg_addr + start_offset); writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8); reg = AUX_ADDR_19_16(reg_addr + start_offset); writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16); /* * Set DisplayPort transaction and read * If bit 3 is 1, DisplayPort transaction. * If Bit 3 is 0, I2C transaction. */ reg = AUX_LENGTH(cur_data_count) | AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_READ; writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1); /* Start AUX transaction */ retval = exynos_dp_start_aux_transaction(dp); if (retval == 0) break; else dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__); } for (cur_data_idx = 0; cur_data_idx < cur_data_count; cur_data_idx++) { reg = readl(dp->reg_base + EXYNOS_DP_BUF_DATA_0 + 4 * cur_data_idx); data[start_offset + cur_data_idx] = (unsigned char)reg; } start_offset += cur_data_count; } return retval; } int exynos_dp_select_i2c_device(struct exynos_dp_device *dp, unsigned int device_addr, unsigned int reg_addr) { u32 reg; int retval; /* Set EDID device address */ reg = device_addr; writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0); writel(0x0, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8); writel(0x0, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16); /* Set offset from base address of EDID device */ writel(reg_addr, dp->reg_base + EXYNOS_DP_BUF_DATA_0); /* * Set I2C transaction and write address * If bit 3 is 1, DisplayPort transaction. * If Bit 3 is 0, I2C transaction. */ reg = AUX_TX_COMM_I2C_TRANSACTION | AUX_TX_COMM_MOT | AUX_TX_COMM_WRITE; writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1); /* Start AUX transaction */ retval = exynos_dp_start_aux_transaction(dp); if (retval != 0) dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__); return retval; } int exynos_dp_read_byte_from_i2c(struct exynos_dp_device *dp, unsigned int device_addr, unsigned int reg_addr, unsigned int *data) { u32 reg; int i; int retval; for (i = 0; i < 3; i++) { /* Clear AUX CH data buffer */ reg = BUF_CLR; writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL); /* Select EDID device */ retval = exynos_dp_select_i2c_device(dp, device_addr, reg_addr); if (retval != 0) continue; /* * Set I2C transaction and read data * If bit 3 is 1, DisplayPort transaction. * If Bit 3 is 0, I2C transaction. */ reg = AUX_TX_COMM_I2C_TRANSACTION | AUX_TX_COMM_READ; writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1); /* Start AUX transaction */ retval = exynos_dp_start_aux_transaction(dp); if (retval == 0) break; else dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__); } /* Read data */ if (retval == 0) *data = readl(dp->reg_base + EXYNOS_DP_BUF_DATA_0); return retval; } int exynos_dp_read_bytes_from_i2c(struct exynos_dp_device *dp, unsigned int device_addr, unsigned int reg_addr, unsigned int count, unsigned char edid[]) { u32 reg; unsigned int i, j; unsigned int cur_data_idx; unsigned int defer = 0; int retval = 0; for (i = 0; i < count; i += 16) { for (j = 0; j < 3; j++) { /* Clear AUX CH data buffer */ reg = BUF_CLR; writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL); /* Set normal AUX CH command */ reg = readl(dp->reg_base + EXYNOS_DP_AUX_CH_CTL_2); reg &= ~ADDR_ONLY; writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_2); /* * If Rx sends defer, Tx sends only reads * request without sending address */ if (!defer) retval = exynos_dp_select_i2c_device(dp, device_addr, reg_addr + i); else defer = 0; if (retval == 0) { /* * Set I2C transaction and write data * If bit 3 is 1, DisplayPort transaction. * If Bit 3 is 0, I2C transaction. */ reg = AUX_LENGTH(16) | AUX_TX_COMM_I2C_TRANSACTION | AUX_TX_COMM_READ; writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1); /* Start AUX transaction */ retval = exynos_dp_start_aux_transaction(dp); if (retval == 0) break; else dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__); } /* Check if Rx sends defer */ reg = readl(dp->reg_base + EXYNOS_DP_AUX_RX_COMM); if (reg == AUX_RX_COMM_AUX_DEFER || reg == AUX_RX_COMM_I2C_DEFER) { dev_err(dp->dev, "Defer: %d\n\n", reg); defer = 1; } } for (cur_data_idx = 0; cur_data_idx < 16; cur_data_idx++) { reg = readl(dp->reg_base + EXYNOS_DP_BUF_DATA_0 + 4 * cur_data_idx); edid[i + cur_data_idx] = (unsigned char)reg; } } return retval; } void exynos_dp_set_link_bandwidth(struct exynos_dp_device *dp, u32 bwtype) { u32 reg; reg = bwtype; if ((bwtype == LINK_RATE_2_70GBPS) || (bwtype == LINK_RATE_1_62GBPS)) writel(reg, dp->reg_base + EXYNOS_DP_LINK_BW_SET); } void exynos_dp_get_link_bandwidth(struct exynos_dp_device *dp, u32 *bwtype) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_LINK_BW_SET); *bwtype = reg; } void exynos_dp_set_lane_count(struct exynos_dp_device *dp, u32 count) { u32 reg; reg = count; writel(reg, dp->reg_base + EXYNOS_DP_LANE_COUNT_SET); } void exynos_dp_get_lane_count(struct exynos_dp_device *dp, u32 *count) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_LANE_COUNT_SET); *count = reg; } void exynos_dp_enable_enhanced_mode(struct exynos_dp_device *dp, bool enable) { u32 reg; if (enable) { reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_4); reg |= ENHANCED; writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_4); } else { reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_4); reg &= ~ENHANCED; writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_4); } } void exynos_dp_set_training_pattern(struct exynos_dp_device *dp, enum pattern_set pattern) { u32 reg; switch (pattern) { case PRBS7: reg = SCRAMBLING_ENABLE | LINK_QUAL_PATTERN_SET_PRBS7; writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET); break; case D10_2: reg = SCRAMBLING_ENABLE | LINK_QUAL_PATTERN_SET_D10_2; writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET); break; case TRAINING_PTN1: reg = SCRAMBLING_DISABLE | SW_TRAINING_PATTERN_SET_PTN1; writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET); break; case TRAINING_PTN2: reg = SCRAMBLING_DISABLE | SW_TRAINING_PATTERN_SET_PTN2; writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET); break; case DP_NONE: reg = SCRAMBLING_ENABLE | LINK_QUAL_PATTERN_SET_DISABLE | SW_TRAINING_PATTERN_SET_NORMAL; writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET); break; default: break; } } void exynos_dp_set_lane0_pre_emphasis(struct exynos_dp_device *dp, u32 level) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_LN0_LINK_TRAINING_CTL); reg &= ~PRE_EMPHASIS_SET_MASK; reg |= level << PRE_EMPHASIS_SET_SHIFT; writel(reg, dp->reg_base + EXYNOS_DP_LN0_LINK_TRAINING_CTL); } void exynos_dp_set_lane1_pre_emphasis(struct exynos_dp_device *dp, u32 level) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_LN1_LINK_TRAINING_CTL); reg &= ~PRE_EMPHASIS_SET_MASK; reg |= level << PRE_EMPHASIS_SET_SHIFT; writel(reg, dp->reg_base + EXYNOS_DP_LN1_LINK_TRAINING_CTL); } void exynos_dp_set_lane2_pre_emphasis(struct exynos_dp_device *dp, u32 level) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_LN2_LINK_TRAINING_CTL); reg &= ~PRE_EMPHASIS_SET_MASK; reg |= level << PRE_EMPHASIS_SET_SHIFT; writel(reg, dp->reg_base + EXYNOS_DP_LN2_LINK_TRAINING_CTL); } void exynos_dp_set_lane3_pre_emphasis(struct exynos_dp_device *dp, u32 level) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_LN3_LINK_TRAINING_CTL); reg &= ~PRE_EMPHASIS_SET_MASK; reg |= level << PRE_EMPHASIS_SET_SHIFT; writel(reg, dp->reg_base + EXYNOS_DP_LN3_LINK_TRAINING_CTL); } void exynos_dp_set_lane0_link_training(struct exynos_dp_device *dp, u32 training_lane) { u32 reg; reg = training_lane; writel(reg, dp->reg_base + EXYNOS_DP_LN0_LINK_TRAINING_CTL); } void exynos_dp_set_lane1_link_training(struct exynos_dp_device *dp, u32 training_lane) { u32 reg; reg = training_lane; writel(reg, dp->reg_base + EXYNOS_DP_LN1_LINK_TRAINING_CTL); } void exynos_dp_set_lane2_link_training(struct exynos_dp_device *dp, u32 training_lane) { u32 reg; reg = training_lane; writel(reg, dp->reg_base + EXYNOS_DP_LN2_LINK_TRAINING_CTL); } void exynos_dp_set_lane3_link_training(struct exynos_dp_device *dp, u32 training_lane) { u32 reg; reg = training_lane; writel(reg, dp->reg_base + EXYNOS_DP_LN3_LINK_TRAINING_CTL); } u32 exynos_dp_get_lane0_link_training(struct exynos_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_LN0_LINK_TRAINING_CTL); return reg; } u32 exynos_dp_get_lane1_link_training(struct exynos_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_LN1_LINK_TRAINING_CTL); return reg; } u32 exynos_dp_get_lane2_link_training(struct exynos_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_LN2_LINK_TRAINING_CTL); return reg; } u32 exynos_dp_get_lane3_link_training(struct exynos_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_LN3_LINK_TRAINING_CTL); return reg; } void exynos_dp_reset_macro(struct exynos_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_PHY_TEST); reg |= MACRO_RST; writel(reg, dp->reg_base + EXYNOS_DP_PHY_TEST); /* 10 us is the minimum reset time. */ usleep_range(10, 20); reg &= ~MACRO_RST; writel(reg, dp->reg_base + EXYNOS_DP_PHY_TEST); } void exynos_dp_init_video(struct exynos_dp_device *dp) { u32 reg; reg = VSYNC_DET | VID_FORMAT_CHG | VID_CLK_CHG; writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_1); reg = 0x0; writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_1); reg = CHA_CRI(4) | CHA_CTRL; writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_2); reg = 0x0; writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_3); reg = VID_HRES_TH(2) | VID_VRES_TH(0); writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_8); } void exynos_dp_set_video_color_format(struct exynos_dp_device *dp) { u32 reg; /* Configure the input color depth, color space, dynamic range */ reg = (dp->video_info->dynamic_range << IN_D_RANGE_SHIFT) | (dp->video_info->color_depth << IN_BPC_SHIFT) | (dp->video_info->color_space << IN_COLOR_F_SHIFT); writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_2); /* Set Input Color YCbCr Coefficients to ITU601 or ITU709 */ reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_3); reg &= ~IN_YC_COEFFI_MASK; if (dp->video_info->ycbcr_coeff) reg |= IN_YC_COEFFI_ITU709; else reg |= IN_YC_COEFFI_ITU601; writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_3); } int exynos_dp_is_slave_video_stream_clock_on(struct exynos_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_1); writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_1); reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_1); if (!(reg & DET_STA)) { dev_dbg(dp->dev, "Input stream clock not detected.\n"); return -EINVAL; } reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_2); writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_2); reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_2); dev_dbg(dp->dev, "wait SYS_CTL_2.\n"); if (reg & CHA_STA) { dev_dbg(dp->dev, "Input stream clk is changing\n"); return -EINVAL; } return 0; } void exynos_dp_set_video_cr_mn(struct exynos_dp_device *dp, enum clock_recovery_m_value_type type, u32 m_value, u32 n_value) { u32 reg; if (type == REGISTER_M) { reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_4); reg |= FIX_M_VID; writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_4); reg = m_value & 0xff; writel(reg, dp->reg_base + EXYNOS_DP_M_VID_0); reg = (m_value >> 8) & 0xff; writel(reg, dp->reg_base + EXYNOS_DP_M_VID_1); reg = (m_value >> 16) & 0xff; writel(reg, dp->reg_base + EXYNOS_DP_M_VID_2); reg = n_value & 0xff; writel(reg, dp->reg_base + EXYNOS_DP_N_VID_0); reg = (n_value >> 8) & 0xff; writel(reg, dp->reg_base + EXYNOS_DP_N_VID_1); reg = (n_value >> 16) & 0xff; writel(reg, dp->reg_base + EXYNOS_DP_N_VID_2); } else { reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_4); reg &= ~FIX_M_VID; writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_4); writel(0x00, dp->reg_base + EXYNOS_DP_N_VID_0); writel(0x80, dp->reg_base + EXYNOS_DP_N_VID_1); writel(0x00, dp->reg_base + EXYNOS_DP_N_VID_2); } } void exynos_dp_set_video_timing_mode(struct exynos_dp_device *dp, u32 type) { u32 reg; if (type == VIDEO_TIMING_FROM_CAPTURE) { reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); reg &= ~FORMAT_SEL; writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); } else { reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); reg |= FORMAT_SEL; writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); } } void exynos_dp_enable_video_master(struct exynos_dp_device *dp, bool enable) { u32 reg; if (enable) { reg = readl(dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL); reg &= ~VIDEO_MODE_MASK; reg |= VIDEO_MASTER_MODE_EN | VIDEO_MODE_MASTER_MODE; writel(reg, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL); } else { reg = readl(dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL); reg &= ~VIDEO_MODE_MASK; reg |= VIDEO_MODE_SLAVE_MODE; writel(reg, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL); } } void exynos_dp_start_video(struct exynos_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_1); reg |= VIDEO_EN; writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_1); } int exynos_dp_is_video_stream_on(struct exynos_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3); writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_3); reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3); if (!(reg & STRM_VALID)) { dev_dbg(dp->dev, "Input video stream is not detected.\n"); return -EINVAL; } return 0; } void exynos_dp_config_video_slave_mode(struct exynos_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_1); reg &= ~(MASTER_VID_FUNC_EN_N|SLAVE_VID_FUNC_EN_N); reg |= MASTER_VID_FUNC_EN_N; writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_1); reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); reg &= ~INTERACE_SCAN_CFG; reg |= (dp->video_info->interlaced << 2); writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); reg &= ~VSYNC_POLARITY_CFG; reg |= (dp->video_info->v_sync_polarity << 1); writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); reg &= ~HSYNC_POLARITY_CFG; reg |= (dp->video_info->h_sync_polarity << 0); writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); reg = AUDIO_MODE_SPDIF_MODE | VIDEO_MODE_SLAVE_MODE; writel(reg, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL); } void exynos_dp_enable_scrambling(struct exynos_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET); reg &= ~SCRAMBLING_DISABLE; writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET); } void exynos_dp_disable_scrambling(struct exynos_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET); reg |= SCRAMBLING_DISABLE; writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET); }
gpl-2.0
artemh/asuswrt-merlin
release/src-rt-6.x.4708/linux/linux-2.6.36/arch/mips/math-emu/sp_scalb.c
1799
1490
/* IEEE754 floating point arithmetic * single precision */ /* * MIPS floating point support * Copyright (C) 1994-2000 Algorithmics Ltd. * http://www.algor.co.uk * * ######################################################################## * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * ######################################################################## */ #include "ieee754sp.h" ieee754sp ieee754sp_scalb(ieee754sp x, int n) { COMPXSP; CLEARCX; EXPLODEXSP; switch (xc) { case IEEE754_CLASS_SNAN: return ieee754sp_nanxcpt(x, "scalb", x, n); case IEEE754_CLASS_QNAN: case IEEE754_CLASS_INF: case IEEE754_CLASS_ZERO: return x; case IEEE754_CLASS_DNORM: SPDNORMX; break; case IEEE754_CLASS_NORM: break; } SPNORMRET2(xs, xe + n, xm << 3, "scalb", x, n); } ieee754sp ieee754sp_ldexp(ieee754sp x, int n) { return ieee754sp_scalb(x, n); }
gpl-2.0
agrabren/android_kernel_htc_shooter
drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
2311
10308
/****************************************************************************** * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, * USA * * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * Contact Information: * Intel Linux Wireless <ilw@linux.intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include "iwl-dev.h" #include "iwl-core.h" #include "iwl-io.h" #include "iwl-agn.h" int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant) { struct iwl_tx_ant_config_cmd tx_ant_cmd = { .valid = cpu_to_le32(valid_tx_ant), }; if (IWL_UCODE_API(priv->ucode_ver) > 1) { IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant); return iwl_send_cmd_pdu(priv, TX_ANT_CONFIGURATION_CMD, sizeof(struct iwl_tx_ant_config_cmd), &tx_ant_cmd); } else { IWL_DEBUG_HC(priv, "TX_ANT_CONFIGURATION_CMD not supported\n"); return -EOPNOTSUPP; } } static u16 iwlagn_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data) { u16 size = (u16)sizeof(struct iwl_addsta_cmd); struct iwl_addsta_cmd *addsta = (struct iwl_addsta_cmd *)data; memcpy(addsta, cmd, size); /* resrved in 5000 */ addsta->rate_n_flags = cpu_to_le16(0); return size; } static void iwlagn_gain_computation(struct iwl_priv *priv, u32 average_noise[NUM_RX_CHAINS], u16 min_average_noise_antenna_i, u32 min_average_noise, u8 default_chain) { int i; s32 delta_g; struct iwl_chain_noise_data *data = &priv->chain_noise_data; /* * Find Gain Code for the chains based on "default chain" */ for (i = default_chain + 1; i < NUM_RX_CHAINS; i++) { if ((data->disconn_array[i])) { data->delta_gain_code[i] = 0; continue; } delta_g = (priv->cfg->base_params->chain_noise_scale * ((s32)average_noise[default_chain] - (s32)average_noise[i])) / 1500; /* bound gain by 2 bits value max, 3rd bit is sign */ data->delta_gain_code[i] = min(abs(delta_g), (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE); if (delta_g < 0) /* * set negative sign ... * note to Intel developers: This is uCode API format, * not the format of any internal device registers. * Do not change this format for e.g. 6050 or similar * devices. Change format only if more resolution * (i.e. more than 2 bits magnitude) is needed. */ data->delta_gain_code[i] |= (1 << 2); } IWL_DEBUG_CALIB(priv, "Delta gains: ANT_B = %d ANT_C = %d\n", data->delta_gain_code[1], data->delta_gain_code[2]); if (!data->radio_write) { struct iwl_calib_chain_noise_gain_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.hdr.op_code = priv->_agn.phy_calib_chain_noise_gain_cmd; cmd.hdr.first_group = 0; cmd.hdr.groups_num = 1; cmd.hdr.data_valid = 1; cmd.delta_gain_1 = data->delta_gain_code[1]; cmd.delta_gain_2 = data->delta_gain_code[2]; iwl_send_cmd_pdu_async(priv, REPLY_PHY_CALIBRATION_CMD, sizeof(cmd), &cmd, NULL); data->radio_write = 1; data->state = IWL_CHAIN_NOISE_CALIBRATED; } } static void iwlagn_chain_noise_reset(struct iwl_priv *priv) { struct iwl_chain_noise_data *data = &priv->chain_noise_data; int ret; if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_any_associated(priv)) { struct iwl_calib_chain_noise_reset_cmd cmd; /* clear data for chain noise calibration algorithm */ data->chain_noise_a = 0; data->chain_noise_b = 0; data->chain_noise_c = 0; data->chain_signal_a = 0; data->chain_signal_b = 0; data->chain_signal_c = 0; data->beacon_count = 0; memset(&cmd, 0, sizeof(cmd)); cmd.hdr.op_code = priv->_agn.phy_calib_chain_noise_reset_cmd; cmd.hdr.first_group = 0; cmd.hdr.groups_num = 1; cmd.hdr.data_valid = 1; ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD, sizeof(cmd), &cmd); if (ret) IWL_ERR(priv, "Could not send REPLY_PHY_CALIBRATION_CMD\n"); data->state = IWL_CHAIN_NOISE_ACCUMULATE; IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n"); } } static void iwlagn_tx_cmd_protection(struct iwl_priv *priv, struct ieee80211_tx_info *info, __le16 fc, __le32 *tx_flags) { if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS || info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT || info->flags & IEEE80211_TX_CTL_AMPDU) *tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK; } /* Calc max signal level (dBm) among 3 possible receivers */ static int iwlagn_calc_rssi(struct iwl_priv *priv, struct iwl_rx_phy_res *rx_resp) { /* data from PHY/DSP regarding signal strength, etc., * contents are always there, not configurable by host */ struct iwlagn_non_cfg_phy *ncphy = (struct iwlagn_non_cfg_phy *)rx_resp->non_cfg_phy_buf; u32 val, rssi_a, rssi_b, rssi_c, max_rssi; u8 agc; val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_AGC_IDX]); agc = (val & IWLAGN_OFDM_AGC_MSK) >> IWLAGN_OFDM_AGC_BIT_POS; /* Find max rssi among 3 possible receivers. * These values are measured by the digital signal processor (DSP). * They should stay fairly constant even as the signal strength varies, * if the radio's automatic gain control (AGC) is working right. * AGC value (see below) will provide the "interesting" info. */ val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_RSSI_AB_IDX]); rssi_a = (val & IWLAGN_OFDM_RSSI_INBAND_A_BITMSK) >> IWLAGN_OFDM_RSSI_A_BIT_POS; rssi_b = (val & IWLAGN_OFDM_RSSI_INBAND_B_BITMSK) >> IWLAGN_OFDM_RSSI_B_BIT_POS; val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_RSSI_C_IDX]); rssi_c = (val & IWLAGN_OFDM_RSSI_INBAND_C_BITMSK) >> IWLAGN_OFDM_RSSI_C_BIT_POS; max_rssi = max_t(u32, rssi_a, rssi_b); max_rssi = max_t(u32, max_rssi, rssi_c); IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n", rssi_a, rssi_b, rssi_c, max_rssi, agc); /* dBm = max_rssi dB - agc dB - constant. * Higher AGC (higher radio gain) means lower signal. */ return max_rssi - agc - IWLAGN_RSSI_OFFSET; } static int iwlagn_set_pan_params(struct iwl_priv *priv) { struct iwl_wipan_params_cmd cmd; struct iwl_rxon_context *ctx_bss, *ctx_pan; int slot0 = 300, slot1 = 0; int ret; if (priv->valid_contexts == BIT(IWL_RXON_CTX_BSS)) return 0; BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); lockdep_assert_held(&priv->mutex); ctx_bss = &priv->contexts[IWL_RXON_CTX_BSS]; ctx_pan = &priv->contexts[IWL_RXON_CTX_PAN]; /* * If the PAN context is inactive, then we don't need * to update the PAN parameters, the last thing we'll * have done before it goes inactive is making the PAN * parameters be WLAN-only. */ if (!ctx_pan->is_active) return 0; memset(&cmd, 0, sizeof(cmd)); /* only 2 slots are currently allowed */ cmd.num_slots = 2; cmd.slots[0].type = 0; /* BSS */ cmd.slots[1].type = 1; /* PAN */ if (priv->_agn.hw_roc_channel) { /* both contexts must be used for this to happen */ slot1 = priv->_agn.hw_roc_duration; slot0 = IWL_MIN_SLOT_TIME; } else if (ctx_bss->vif && ctx_pan->vif) { int bcnint = ctx_pan->vif->bss_conf.beacon_int; int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1; /* should be set, but seems unused?? */ cmd.flags |= cpu_to_le16(IWL_WIPAN_PARAMS_FLG_SLOTTED_MODE); if (ctx_pan->vif->type == NL80211_IFTYPE_AP && bcnint && bcnint != ctx_bss->vif->bss_conf.beacon_int) { IWL_ERR(priv, "beacon intervals don't match (%d, %d)\n", ctx_bss->vif->bss_conf.beacon_int, ctx_pan->vif->bss_conf.beacon_int); } else bcnint = max_t(int, bcnint, ctx_bss->vif->bss_conf.beacon_int); if (!bcnint) bcnint = DEFAULT_BEACON_INTERVAL; slot0 = bcnint / 2; slot1 = bcnint - slot0; if (test_bit(STATUS_SCAN_HW, &priv->status) || (!ctx_bss->vif->bss_conf.idle && !ctx_bss->vif->bss_conf.assoc)) { slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME; slot1 = IWL_MIN_SLOT_TIME; } else if (!ctx_pan->vif->bss_conf.idle && !ctx_pan->vif->bss_conf.assoc) { slot1 = bcnint * 3 - IWL_MIN_SLOT_TIME; slot0 = IWL_MIN_SLOT_TIME; } } else if (ctx_pan->vif) { slot0 = 0; slot1 = max_t(int, 1, ctx_pan->vif->bss_conf.dtim_period) * ctx_pan->vif->bss_conf.beacon_int; slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1); if (test_bit(STATUS_SCAN_HW, &priv->status)) { slot0 = slot1 * 3 - IWL_MIN_SLOT_TIME; slot1 = IWL_MIN_SLOT_TIME; } } cmd.slots[0].width = cpu_to_le16(slot0); cmd.slots[1].width = cpu_to_le16(slot1); ret = iwl_send_cmd_pdu(priv, REPLY_WIPAN_PARAMS, sizeof(cmd), &cmd); if (ret) IWL_ERR(priv, "Error setting PAN parameters (%d)\n", ret); return ret; } struct iwl_hcmd_ops iwlagn_hcmd = { .commit_rxon = iwlagn_commit_rxon, .set_rxon_chain = iwlagn_set_rxon_chain, .set_tx_ant = iwlagn_send_tx_ant_config, .send_bt_config = iwl_send_bt_config, .set_pan_params = iwlagn_set_pan_params, }; struct iwl_hcmd_ops iwlagn_bt_hcmd = { .commit_rxon = iwlagn_commit_rxon, .set_rxon_chain = iwlagn_set_rxon_chain, .set_tx_ant = iwlagn_send_tx_ant_config, .send_bt_config = iwlagn_send_advance_bt_config, .set_pan_params = iwlagn_set_pan_params, }; struct iwl_hcmd_utils_ops iwlagn_hcmd_utils = { .build_addsta_hcmd = iwlagn_build_addsta_hcmd, .gain_computation = iwlagn_gain_computation, .chain_noise_reset = iwlagn_chain_noise_reset, .tx_cmd_protection = iwlagn_tx_cmd_protection, .calc_rssi = iwlagn_calc_rssi, .request_scan = iwlagn_request_scan, .post_scan = iwlagn_post_scan, };
gpl-2.0
CyanogenMod/htc-kernel-msm8660
arch/arm/mach-pnx4008/irq.c
3079
4025
/* * arch/arm/mach-pnx4008/irq.c * * PNX4008 IRQ controller driver * * Author: Dmitry Chigirev <source@mvista.com> * * Based on reference code received from Philips: * Copyright (C) 2003 Philips Semiconductors * * 2005 (c) MontaVista Software, Inc. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/device.h> #include <linux/irq.h> #include <linux/io.h> #include <mach/hardware.h> #include <asm/setup.h> #include <asm/pgtable.h> #include <asm/page.h> #include <asm/system.h> #include <asm/mach/arch.h> #include <asm/mach/irq.h> #include <asm/mach/map.h> #include <mach/irq.h> static u8 pnx4008_irq_type[NR_IRQS] = PNX4008_IRQ_TYPES; static void pnx4008_mask_irq(struct irq_data *d) { __raw_writel(__raw_readl(INTC_ER(d->irq)) & ~INTC_BIT(d->irq), INTC_ER(d->irq)); /* mask interrupt */ } static void pnx4008_unmask_irq(struct irq_data *d) { __raw_writel(__raw_readl(INTC_ER(d->irq)) | INTC_BIT(d->irq), INTC_ER(d->irq)); /* unmask interrupt */ } static void pnx4008_mask_ack_irq(struct irq_data *d) { __raw_writel(__raw_readl(INTC_ER(d->irq)) & ~INTC_BIT(d->irq), INTC_ER(d->irq)); /* mask interrupt */ __raw_writel(INTC_BIT(d->irq), INTC_SR(d->irq)); /* clear interrupt status */ } static int pnx4008_set_irq_type(struct irq_data *d, unsigned int type) { switch (type) { case IRQ_TYPE_EDGE_RISING: __raw_writel(__raw_readl(INTC_ATR(d->irq)) | INTC_BIT(d->irq), INTC_ATR(d->irq)); /*edge sensitive */ __raw_writel(__raw_readl(INTC_APR(d->irq)) | INTC_BIT(d->irq), INTC_APR(d->irq)); /*rising edge */ irq_set_handler(d->irq, handle_edge_irq); break; case IRQ_TYPE_EDGE_FALLING: __raw_writel(__raw_readl(INTC_ATR(d->irq)) | INTC_BIT(d->irq), INTC_ATR(d->irq)); /*edge sensitive */ __raw_writel(__raw_readl(INTC_APR(d->irq)) & ~INTC_BIT(d->irq), INTC_APR(d->irq)); /*falling edge */ irq_set_handler(d->irq, handle_edge_irq); break; case IRQ_TYPE_LEVEL_LOW: __raw_writel(__raw_readl(INTC_ATR(d->irq)) & ~INTC_BIT(d->irq), INTC_ATR(d->irq)); /*level sensitive */ __raw_writel(__raw_readl(INTC_APR(d->irq)) & ~INTC_BIT(d->irq), INTC_APR(d->irq)); /*low level */ irq_set_handler(d->irq, handle_level_irq); break; case IRQ_TYPE_LEVEL_HIGH: __raw_writel(__raw_readl(INTC_ATR(d->irq)) & ~INTC_BIT(d->irq), INTC_ATR(d->irq)); /*level sensitive */ __raw_writel(__raw_readl(INTC_APR(d->irq)) | INTC_BIT(d->irq), INTC_APR(d->irq)); /* high level */ irq_set_handler(d->irq, handle_level_irq); break; /* IRQ_TYPE_EDGE_BOTH is not supported */ default: printk(KERN_ERR "PNX4008 IRQ: Unsupported irq type %d\n", type); return -1; } return 0; } static struct irq_chip pnx4008_irq_chip = { .irq_ack = pnx4008_mask_ack_irq, .irq_mask = pnx4008_mask_irq, .irq_unmask = pnx4008_unmask_irq, .irq_set_type = pnx4008_set_irq_type, }; void __init pnx4008_init_irq(void) { unsigned int i; /* configure IRQ's */ for (i = 0; i < NR_IRQS; i++) { set_irq_flags(i, IRQF_VALID); irq_set_chip(i, &pnx4008_irq_chip); pnx4008_set_irq_type(irq_get_irq_data(i), pnx4008_irq_type[i]); } /* configure and enable IRQ 0,1,30,31 (cascade interrupts) */ pnx4008_set_irq_type(irq_get_irq_data(SUB1_IRQ_N), pnx4008_irq_type[SUB1_IRQ_N]); pnx4008_set_irq_type(irq_get_irq_data(SUB2_IRQ_N), pnx4008_irq_type[SUB2_IRQ_N]); pnx4008_set_irq_type(irq_get_irq_data(SUB1_FIQ_N), pnx4008_irq_type[SUB1_FIQ_N]); pnx4008_set_irq_type(irq_get_irq_data(SUB2_FIQ_N), pnx4008_irq_type[SUB2_FIQ_N]); /* mask all others */ __raw_writel((1 << SUB2_FIQ_N) | (1 << SUB1_FIQ_N) | (1 << SUB2_IRQ_N) | (1 << SUB1_IRQ_N), INTC_ER(MAIN_BASE_INT)); __raw_writel(0, INTC_ER(SIC1_BASE_INT)); __raw_writel(0, INTC_ER(SIC2_BASE_INT)); }
gpl-2.0
agat63/N861_ZTE_kernel
security/apparmor/audit.c
3335
4642
/* * AppArmor security module * * This file contains AppArmor auditing functions * * Copyright (C) 1998-2008 Novell/SUSE * Copyright 2009-2010 Canonical Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2 of the * License. */ #include <linux/audit.h> #include <linux/socket.h> #include "include/apparmor.h" #include "include/audit.h" #include "include/policy.h" const char *op_table[] = { "null", "sysctl", "capable", "unlink", "mkdir", "rmdir", "mknod", "truncate", "link", "symlink", "rename_src", "rename_dest", "chmod", "chown", "getattr", "open", "file_perm", "file_lock", "file_mmap", "file_mprotect", "create", "post_create", "bind", "connect", "listen", "accept", "sendmsg", "recvmsg", "getsockname", "getpeername", "getsockopt", "setsockopt", "socket_shutdown", "ptrace", "exec", "change_hat", "change_profile", "change_onexec", "setprocattr", "setrlimit", "profile_replace", "profile_load", "profile_remove" }; const char *audit_mode_names[] = { "normal", "quiet_denied", "quiet", "noquiet", "all" }; static char *aa_audit_type[] = { "AUDIT", "ALLOWED", "DENIED", "HINT", "STATUS", "ERROR", "KILLED" }; /* * Currently AppArmor auditing is fed straight into the audit framework. * * TODO: * netlink interface for complain mode * user auditing, - send user auditing to netlink interface * system control of whether user audit messages go to system log */ /** * audit_base - core AppArmor function. * @ab: audit buffer to fill (NOT NULL) * @ca: audit structure containing data to audit (NOT NULL) * * Record common AppArmor audit data from @sa */ static void audit_pre(struct audit_buffer *ab, void *ca) { struct common_audit_data *sa = ca; struct task_struct *tsk = sa->tsk ? sa->tsk : current; if (aa_g_audit_header) { audit_log_format(ab, "apparmor="); audit_log_string(ab, aa_audit_type[sa->aad.type]); } if (sa->aad.op) { audit_log_format(ab, " operation="); audit_log_string(ab, op_table[sa->aad.op]); } if (sa->aad.info) { audit_log_format(ab, " info="); audit_log_string(ab, sa->aad.info); if (sa->aad.error) audit_log_format(ab, " error=%d", sa->aad.error); } if (sa->aad.profile) { struct aa_profile *profile = sa->aad.profile; pid_t pid; rcu_read_lock(); pid = tsk->real_parent->pid; rcu_read_unlock(); audit_log_format(ab, " parent=%d", pid); if (profile->ns != root_ns) { audit_log_format(ab, " namespace="); audit_log_untrustedstring(ab, profile->ns->base.hname); } audit_log_format(ab, " profile="); audit_log_untrustedstring(ab, profile->base.hname); } if (sa->aad.name) { audit_log_format(ab, " name="); audit_log_untrustedstring(ab, sa->aad.name); } } /** * aa_audit_msg - Log a message to the audit subsystem * @sa: audit event structure (NOT NULL) * @cb: optional callback fn for type specific fields (MAYBE NULL) */ void aa_audit_msg(int type, struct common_audit_data *sa, void (*cb) (struct audit_buffer *, void *)) { sa->aad.type = type; sa->lsm_pre_audit = audit_pre; sa->lsm_post_audit = cb; common_lsm_audit(sa); } /** * aa_audit - Log a profile based audit event to the audit subsystem * @type: audit type for the message * @profile: profile to check against (NOT NULL) * @gfp: allocation flags to use * @sa: audit event (NOT NULL) * @cb: optional callback fn for type specific fields (MAYBE NULL) * * Handle default message switching based off of audit mode flags * * Returns: error on failure */ int aa_audit(int type, struct aa_profile *profile, gfp_t gfp, struct common_audit_data *sa, void (*cb) (struct audit_buffer *, void *)) { BUG_ON(!profile); if (type == AUDIT_APPARMOR_AUTO) { if (likely(!sa->aad.error)) { if (AUDIT_MODE(profile) != AUDIT_ALL) return 0; type = AUDIT_APPARMOR_AUDIT; } else if (COMPLAIN_MODE(profile)) type = AUDIT_APPARMOR_ALLOWED; else type = AUDIT_APPARMOR_DENIED; } if (AUDIT_MODE(profile) == AUDIT_QUIET || (type == AUDIT_APPARMOR_DENIED && AUDIT_MODE(profile) == AUDIT_QUIET)) return sa->aad.error; if (KILL_MODE(profile) && type == AUDIT_APPARMOR_DENIED) type = AUDIT_APPARMOR_KILL; if (!unconfined(profile)) sa->aad.profile = profile; aa_audit_msg(type, sa, cb); if (sa->aad.type == AUDIT_APPARMOR_KILL) (void)send_sig_info(SIGKILL, NULL, sa->tsk ? sa->tsk : current); if (sa->aad.type == AUDIT_APPARMOR_ALLOWED) return complain_error(sa->aad.error); return sa->aad.error; }
gpl-2.0
Split-Screen/android_kernel_samsung_manta
arch/ia64/sn/kernel/io_init.c
6663
10655
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1992 - 1997, 2000-2006 Silicon Graphics, Inc. All rights reserved. */ #include <linux/slab.h> #include <linux/export.h> #include <asm/sn/types.h> #include <asm/sn/addrs.h> #include <asm/sn/io.h> #include <asm/sn/module.h> #include <asm/sn/intr.h> #include <asm/sn/pcibus_provider_defs.h> #include <asm/sn/pcidev.h> #include <asm/sn/sn_sal.h> #include "xtalk/hubdev.h" /* * The code in this file will only be executed when running with * a PROM that does _not_ have base ACPI IO support. * (i.e., SN_ACPI_BASE_SUPPORT() == 0) */ static int max_segment_number; /* Default highest segment number */ static int max_pcibus_number = 255; /* Default highest pci bus number */ /* * Retrieve the hub device info structure for the given nasid. */ static inline u64 sal_get_hubdev_info(u64 handle, u64 address) { struct ia64_sal_retval ret_stuff; ret_stuff.status = 0; ret_stuff.v0 = 0; SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_GET_HUBDEV_INFO, (u64) handle, (u64) address, 0, 0, 0, 0, 0); return ret_stuff.v0; } /* * Retrieve the pci bus information given the bus number. */ static inline u64 sal_get_pcibus_info(u64 segment, u64 busnum, u64 address) { struct ia64_sal_retval ret_stuff; ret_stuff.status = 0; ret_stuff.v0 = 0; SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_GET_PCIBUS_INFO, (u64) segment, (u64) busnum, (u64) address, 0, 0, 0, 0); return ret_stuff.v0; } /* * Retrieve the pci device information given the bus and device|function number. */ static inline u64 sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev, u64 sn_irq_info) { struct ia64_sal_retval ret_stuff; ret_stuff.status = 0; ret_stuff.v0 = 0; SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_GET_PCIDEV_INFO, (u64) segment, (u64) bus_number, (u64) devfn, (u64) pci_dev, sn_irq_info, 0, 0); return ret_stuff.v0; } /* * sn_fixup_ionodes() - This routine initializes the HUB data structure for * each node in the system. This function is only * executed when running with a non-ACPI capable PROM. */ static void __init sn_fixup_ionodes(void) { struct hubdev_info *hubdev; u64 status; u64 nasid; int i; extern void sn_common_hubdev_init(struct hubdev_info *); /* * Get SGI Specific HUB chipset information. * Inform Prom that this kernel can support domain bus numbering. */ for (i = 0; i < num_cnodes; i++) { hubdev = (struct hubdev_info *)(NODEPDA(i)->pdinfo); nasid = cnodeid_to_nasid(i); hubdev->max_segment_number = 0xffffffff; hubdev->max_pcibus_number = 0xff; status = sal_get_hubdev_info(nasid, (u64) __pa(hubdev)); if (status) continue; /* Save the largest Domain and pcibus numbers found. */ if (hubdev->max_segment_number) { /* * Dealing with a Prom that supports segments. */ max_segment_number = hubdev->max_segment_number; max_pcibus_number = hubdev->max_pcibus_number; } sn_common_hubdev_init(hubdev); } } /* * sn_pci_legacy_window_fixup - Create PCI controller windows for * legacy IO and MEM space. This needs to * be done here, as the PROM does not have * ACPI support defining the root buses * and their resources (_CRS), */ static void sn_legacy_pci_window_fixup(struct pci_controller *controller, u64 legacy_io, u64 legacy_mem) { controller->window = kcalloc(2, sizeof(struct pci_window), GFP_KERNEL); BUG_ON(controller->window == NULL); controller->window[0].offset = legacy_io; controller->window[0].resource.name = "legacy_io"; controller->window[0].resource.flags = IORESOURCE_IO; controller->window[0].resource.start = legacy_io; controller->window[0].resource.end = controller->window[0].resource.start + 0xffff; controller->window[0].resource.parent = &ioport_resource; controller->window[1].offset = legacy_mem; controller->window[1].resource.name = "legacy_mem"; controller->window[1].resource.flags = IORESOURCE_MEM; controller->window[1].resource.start = legacy_mem; controller->window[1].resource.end = controller->window[1].resource.start + (1024 * 1024) - 1; controller->window[1].resource.parent = &iomem_resource; controller->windows = 2; } /* * sn_pci_window_fixup() - Create a pci_window for each device resource. * It will setup pci_windows for use by * pcibios_bus_to_resource(), pcibios_resource_to_bus(), * etc. */ static void sn_pci_window_fixup(struct pci_dev *dev, unsigned int count, s64 * pci_addrs) { struct pci_controller *controller = PCI_CONTROLLER(dev->bus); unsigned int i; unsigned int idx; unsigned int new_count; struct pci_window *new_window; if (count == 0) return; idx = controller->windows; new_count = controller->windows + count; new_window = kcalloc(new_count, sizeof(struct pci_window), GFP_KERNEL); BUG_ON(new_window == NULL); if (controller->window) { memcpy(new_window, controller->window, sizeof(struct pci_window) * controller->windows); kfree(controller->window); } /* Setup a pci_window for each device resource. */ for (i = 0; i <= PCI_ROM_RESOURCE; i++) { if (pci_addrs[i] == -1) continue; new_window[idx].offset = dev->resource[i].start - pci_addrs[i]; new_window[idx].resource = dev->resource[i]; idx++; } controller->windows = new_count; controller->window = new_window; } /* * sn_io_slot_fixup() - We are not running with an ACPI capable PROM, * and need to convert the pci_dev->resource * 'start' and 'end' addresses to mapped addresses, * and setup the pci_controller->window array entries. */ void sn_io_slot_fixup(struct pci_dev *dev) { unsigned int count = 0; int idx; s64 pci_addrs[PCI_ROM_RESOURCE + 1]; unsigned long addr, end, size, start; struct pcidev_info *pcidev_info; struct sn_irq_info *sn_irq_info; int status; pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL); if (!pcidev_info) panic("%s: Unable to alloc memory for pcidev_info", __func__); sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL); if (!sn_irq_info) panic("%s: Unable to alloc memory for sn_irq_info", __func__); /* Call to retrieve pci device information needed by kernel. */ status = sal_get_pcidev_info((u64) pci_domain_nr(dev), (u64) dev->bus->number, dev->devfn, (u64) __pa(pcidev_info), (u64) __pa(sn_irq_info)); BUG_ON(status); /* Cannot get platform pci device information */ /* Copy over PIO Mapped Addresses */ for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) { if (!pcidev_info->pdi_pio_mapped_addr[idx]) { pci_addrs[idx] = -1; continue; } start = dev->resource[idx].start; end = dev->resource[idx].end; size = end - start; if (size == 0) { pci_addrs[idx] = -1; continue; } pci_addrs[idx] = start; count++; addr = pcidev_info->pdi_pio_mapped_addr[idx]; addr = ((addr << 4) >> 4) | __IA64_UNCACHED_OFFSET; dev->resource[idx].start = addr; dev->resource[idx].end = addr + size; /* * if it's already in the device structure, remove it before * inserting */ if (dev->resource[idx].parent && dev->resource[idx].parent->child) release_resource(&dev->resource[idx]); if (dev->resource[idx].flags & IORESOURCE_IO) insert_resource(&ioport_resource, &dev->resource[idx]); else insert_resource(&iomem_resource, &dev->resource[idx]); /* * If ROM, set the actual ROM image size, and mark as * shadowed in PROM. */ if (idx == PCI_ROM_RESOURCE) { size_t image_size; void __iomem *rom; rom = ioremap(pci_resource_start(dev, PCI_ROM_RESOURCE), size + 1); image_size = pci_get_rom_size(dev, rom, size + 1); dev->resource[PCI_ROM_RESOURCE].end = dev->resource[PCI_ROM_RESOURCE].start + image_size - 1; dev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_BIOS_COPY; } } /* Create a pci_window in the pci_controller struct for * each device resource. */ if (count > 0) sn_pci_window_fixup(dev, count, pci_addrs); sn_pci_fixup_slot(dev, pcidev_info, sn_irq_info); } EXPORT_SYMBOL(sn_io_slot_fixup); /* * sn_pci_controller_fixup() - This routine sets up a bus's resources * consistent with the Linux PCI abstraction layer. */ static void __init sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus) { s64 status = 0; struct pci_controller *controller; struct pcibus_bussoft *prom_bussoft_ptr; LIST_HEAD(resources); int i; status = sal_get_pcibus_info((u64) segment, (u64) busnum, (u64) ia64_tpa(&prom_bussoft_ptr)); if (status > 0) return; /*bus # does not exist */ prom_bussoft_ptr = __va(prom_bussoft_ptr); controller = kzalloc(sizeof(*controller), GFP_KERNEL); BUG_ON(!controller); controller->segment = segment; /* * Temporarily save the prom_bussoft_ptr for use by sn_bus_fixup(). * (platform_data will be overwritten later in sn_common_bus_fixup()) */ controller->platform_data = prom_bussoft_ptr; sn_legacy_pci_window_fixup(controller, prom_bussoft_ptr->bs_legacy_io, prom_bussoft_ptr->bs_legacy_mem); for (i = 0; i < controller->windows; i++) pci_add_resource_offset(&resources, &controller->window[i].resource, controller->window[i].offset); bus = pci_scan_root_bus(NULL, busnum, &pci_root_ops, controller, &resources); if (bus == NULL) goto error_return; /* error, or bus already scanned */ bus->sysdata = controller; return; error_return: kfree(controller); return; } /* * sn_bus_fixup */ void sn_bus_fixup(struct pci_bus *bus) { struct pci_dev *pci_dev = NULL; struct pcibus_bussoft *prom_bussoft_ptr; if (!bus->parent) { /* If root bus */ prom_bussoft_ptr = PCI_CONTROLLER(bus)->platform_data; if (prom_bussoft_ptr == NULL) { printk(KERN_ERR "sn_bus_fixup: 0x%04x:0x%02x Unable to " "obtain prom_bussoft_ptr\n", pci_domain_nr(bus), bus->number); return; } sn_common_bus_fixup(bus, prom_bussoft_ptr); } list_for_each_entry(pci_dev, &bus->devices, bus_list) { sn_io_slot_fixup(pci_dev); } } /* * sn_io_init - PROM does not have ACPI support to define nodes or root buses, * so we need to do things the hard way, including initiating the * bus scanning ourselves. */ void __init sn_io_init(void) { int i, j; sn_fixup_ionodes(); /* busses are not known yet ... */ for (i = 0; i <= max_segment_number; i++) for (j = 0; j <= max_pcibus_number; j++) sn_pci_controller_fixup(i, j, NULL); }
gpl-2.0
h0tw1r3/kernel_samsung_sghi717
fs/afs/server.c
8199
8083
/* AFS server record management * * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/sched.h> #include <linux/slab.h> #include "internal.h" static unsigned afs_server_timeout = 10; /* server timeout in seconds */ static void afs_reap_server(struct work_struct *); /* tree of all the servers, indexed by IP address */ static struct rb_root afs_servers = RB_ROOT; static DEFINE_RWLOCK(afs_servers_lock); /* LRU list of all the servers not currently in use */ static LIST_HEAD(afs_server_graveyard); static DEFINE_SPINLOCK(afs_server_graveyard_lock); static DECLARE_DELAYED_WORK(afs_server_reaper, afs_reap_server); /* * install a server record in the master tree */ static int afs_install_server(struct afs_server *server) { struct afs_server *xserver; struct rb_node **pp, *p; int ret; _enter("%p", server); write_lock(&afs_servers_lock); ret = -EEXIST; pp = &afs_servers.rb_node; p = NULL; while (*pp) { p = *pp; _debug("- consider %p", p); xserver = rb_entry(p, struct afs_server, master_rb); if (server->addr.s_addr < xserver->addr.s_addr) pp = &(*pp)->rb_left; else if (server->addr.s_addr > xserver->addr.s_addr) pp = &(*pp)->rb_right; else goto error; } rb_link_node(&server->master_rb, p, pp); rb_insert_color(&server->master_rb, &afs_servers); ret = 0; error: write_unlock(&afs_servers_lock); return ret; } /* * allocate a new server record */ static struct afs_server *afs_alloc_server(struct afs_cell *cell, const struct in_addr *addr) { struct afs_server *server; _enter(""); server = kzalloc(sizeof(struct afs_server), GFP_KERNEL); if (server) { atomic_set(&server->usage, 1); server->cell = cell; INIT_LIST_HEAD(&server->link); INIT_LIST_HEAD(&server->grave); init_rwsem(&server->sem); spin_lock_init(&server->fs_lock); server->fs_vnodes = RB_ROOT; server->cb_promises = RB_ROOT; spin_lock_init(&server->cb_lock); init_waitqueue_head(&server->cb_break_waitq); INIT_DELAYED_WORK(&server->cb_break_work, afs_dispatch_give_up_callbacks); memcpy(&server->addr, addr, sizeof(struct in_addr)); server->addr.s_addr = addr->s_addr; _leave(" = %p{%d}", server, atomic_read(&server->usage)); } else { _leave(" = NULL [nomem]"); } return server; } /* * get an FS-server record for a cell */ struct afs_server *afs_lookup_server(struct afs_cell *cell, const struct in_addr *addr) { struct afs_server *server, *candidate; _enter("%p,%pI4", cell, &addr->s_addr); /* quick scan of the list to see if we already have the server */ read_lock(&cell->servers_lock); list_for_each_entry(server, &cell->servers, link) { if (server->addr.s_addr == addr->s_addr) goto found_server_quickly; } read_unlock(&cell->servers_lock); candidate = afs_alloc_server(cell, addr); if (!candidate) { _leave(" = -ENOMEM"); return ERR_PTR(-ENOMEM); } write_lock(&cell->servers_lock); /* check the cell's server list again */ list_for_each_entry(server, &cell->servers, link) { if (server->addr.s_addr == addr->s_addr) goto found_server; } _debug("new"); server = candidate; if (afs_install_server(server) < 0) goto server_in_two_cells; afs_get_cell(cell); list_add_tail(&server->link, &cell->servers); write_unlock(&cell->servers_lock); _leave(" = %p{%d}", server, atomic_read(&server->usage)); return server; /* found a matching server quickly */ found_server_quickly: _debug("found quickly"); afs_get_server(server); read_unlock(&cell->servers_lock); no_longer_unused: if (!list_empty(&server->grave)) { spin_lock(&afs_server_graveyard_lock); list_del_init(&server->grave); spin_unlock(&afs_server_graveyard_lock); } _leave(" = %p{%d}", server, atomic_read(&server->usage)); return server; /* found a matching server on the second pass */ found_server: _debug("found"); afs_get_server(server); write_unlock(&cell->servers_lock); kfree(candidate); goto no_longer_unused; /* found a server that seems to be in two cells */ server_in_two_cells: write_unlock(&cell->servers_lock); kfree(candidate); printk(KERN_NOTICE "kAFS: Server %pI4 appears to be in two cells\n", addr); _leave(" = -EEXIST"); return ERR_PTR(-EEXIST); } /* * look up a server by its IP address */ struct afs_server *afs_find_server(const struct in_addr *_addr) { struct afs_server *server = NULL; struct rb_node *p; struct in_addr addr = *_addr; _enter("%pI4", &addr.s_addr); read_lock(&afs_servers_lock); p = afs_servers.rb_node; while (p) { server = rb_entry(p, struct afs_server, master_rb); _debug("- consider %p", p); if (addr.s_addr < server->addr.s_addr) { p = p->rb_left; } else if (addr.s_addr > server->addr.s_addr) { p = p->rb_right; } else { afs_get_server(server); goto found; } } server = NULL; found: read_unlock(&afs_servers_lock); ASSERTIFCMP(server, server->addr.s_addr, ==, addr.s_addr); _leave(" = %p", server); return server; } /* * destroy a server record * - removes from the cell list */ void afs_put_server(struct afs_server *server) { if (!server) return; _enter("%p{%d}", server, atomic_read(&server->usage)); _debug("PUT SERVER %d", atomic_read(&server->usage)); ASSERTCMP(atomic_read(&server->usage), >, 0); if (likely(!atomic_dec_and_test(&server->usage))) { _leave(""); return; } afs_flush_callback_breaks(server); spin_lock(&afs_server_graveyard_lock); if (atomic_read(&server->usage) == 0) { list_move_tail(&server->grave, &afs_server_graveyard); server->time_of_death = get_seconds(); queue_delayed_work(afs_wq, &afs_server_reaper, afs_server_timeout * HZ); } spin_unlock(&afs_server_graveyard_lock); _leave(" [dead]"); } /* * destroy a dead server */ static void afs_destroy_server(struct afs_server *server) { _enter("%p", server); ASSERTIF(server->cb_break_head != server->cb_break_tail, delayed_work_pending(&server->cb_break_work)); ASSERTCMP(server->fs_vnodes.rb_node, ==, NULL); ASSERTCMP(server->cb_promises.rb_node, ==, NULL); ASSERTCMP(server->cb_break_head, ==, server->cb_break_tail); ASSERTCMP(atomic_read(&server->cb_break_n), ==, 0); afs_put_cell(server->cell); kfree(server); } /* * reap dead server records */ static void afs_reap_server(struct work_struct *work) { LIST_HEAD(corpses); struct afs_server *server; unsigned long delay, expiry; time_t now; now = get_seconds(); spin_lock(&afs_server_graveyard_lock); while (!list_empty(&afs_server_graveyard)) { server = list_entry(afs_server_graveyard.next, struct afs_server, grave); /* the queue is ordered most dead first */ expiry = server->time_of_death + afs_server_timeout; if (expiry > now) { delay = (expiry - now) * HZ; if (!queue_delayed_work(afs_wq, &afs_server_reaper, delay)) { cancel_delayed_work(&afs_server_reaper); queue_delayed_work(afs_wq, &afs_server_reaper, delay); } break; } write_lock(&server->cell->servers_lock); write_lock(&afs_servers_lock); if (atomic_read(&server->usage) > 0) { list_del_init(&server->grave); } else { list_move_tail(&server->grave, &corpses); list_del_init(&server->link); rb_erase(&server->master_rb, &afs_servers); } write_unlock(&afs_servers_lock); write_unlock(&server->cell->servers_lock); } spin_unlock(&afs_server_graveyard_lock); /* now reap the corpses we've extracted */ while (!list_empty(&corpses)) { server = list_entry(corpses.next, struct afs_server, grave); list_del(&server->grave); afs_destroy_server(server); } } /* * discard all the server records for rmmod */ void __exit afs_purge_servers(void) { afs_server_timeout = 0; cancel_delayed_work(&afs_server_reaper); queue_delayed_work(afs_wq, &afs_server_reaper, 0); }
gpl-2.0
knone1/android_kernel_asus_moorefield-backup
tools/power/cpupower/lib/cpufreq.c
9991
3890
/* * (C) 2004-2009 Dominik Brodowski <linux@dominikbrodowski.de> * * Licensed under the terms of the GNU GPL License version 2. */ #include <stdio.h> #include <errno.h> #include <stdlib.h> #include <string.h> #include "cpufreq.h" #include "sysfs.h" int cpufreq_cpu_exists(unsigned int cpu) { return sysfs_cpu_exists(cpu); } unsigned long cpufreq_get_freq_kernel(unsigned int cpu) { return sysfs_get_freq_kernel(cpu); } unsigned long cpufreq_get_freq_hardware(unsigned int cpu) { return sysfs_get_freq_hardware(cpu); } unsigned long cpufreq_get_transition_latency(unsigned int cpu) { return sysfs_get_freq_transition_latency(cpu); } int cpufreq_get_hardware_limits(unsigned int cpu, unsigned long *min, unsigned long *max) { if ((!min) || (!max)) return -EINVAL; return sysfs_get_freq_hardware_limits(cpu, min, max); } char *cpufreq_get_driver(unsigned int cpu) { return sysfs_get_freq_driver(cpu); } void cpufreq_put_driver(char *ptr) { if (!ptr) return; free(ptr); } struct cpufreq_policy *cpufreq_get_policy(unsigned int cpu) { return sysfs_get_freq_policy(cpu); } void cpufreq_put_policy(struct cpufreq_policy *policy) { if ((!policy) || (!policy->governor)) return; free(policy->governor); policy->governor = NULL; free(policy); } struct cpufreq_available_governors *cpufreq_get_available_governors(unsigned int cpu) { return sysfs_get_freq_available_governors(cpu); } void cpufreq_put_available_governors(struct cpufreq_available_governors *any) { struct cpufreq_available_governors *tmp, *next; if (!any) return; tmp = any->first; while (tmp) { next = tmp->next; if (tmp->governor) free(tmp->governor); free(tmp); tmp = next; } } struct cpufreq_available_frequencies *cpufreq_get_available_frequencies(unsigned int cpu) { return sysfs_get_available_frequencies(cpu); } void cpufreq_put_available_frequencies(struct cpufreq_available_frequencies *any) { struct cpufreq_available_frequencies *tmp, *next; if (!any) return; tmp = any->first; while (tmp) { next = tmp->next; free(tmp); tmp = next; } } struct cpufreq_affected_cpus *cpufreq_get_affected_cpus(unsigned int cpu) { return sysfs_get_freq_affected_cpus(cpu); } void cpufreq_put_affected_cpus(struct cpufreq_affected_cpus *any) { struct cpufreq_affected_cpus *tmp, *next; if (!any) return; tmp = any->first; while (tmp) { next = tmp->next; free(tmp); tmp = next; } } struct cpufreq_affected_cpus *cpufreq_get_related_cpus(unsigned int cpu) { return sysfs_get_freq_related_cpus(cpu); } void cpufreq_put_related_cpus(struct cpufreq_affected_cpus *any) { cpufreq_put_affected_cpus(any); } int cpufreq_set_policy(unsigned int cpu, struct cpufreq_policy *policy) { if (!policy || !(policy->governor)) return -EINVAL; return sysfs_set_freq_policy(cpu, policy); } int cpufreq_modify_policy_min(unsigned int cpu, unsigned long min_freq) { return sysfs_modify_freq_policy_min(cpu, min_freq); } int cpufreq_modify_policy_max(unsigned int cpu, unsigned long max_freq) { return sysfs_modify_freq_policy_max(cpu, max_freq); } int cpufreq_modify_policy_governor(unsigned int cpu, char *governor) { if ((!governor) || (strlen(governor) > 19)) return -EINVAL; return sysfs_modify_freq_policy_governor(cpu, governor); } int cpufreq_set_frequency(unsigned int cpu, unsigned long target_frequency) { return sysfs_set_frequency(cpu, target_frequency); } struct cpufreq_stats *cpufreq_get_stats(unsigned int cpu, unsigned long long *total_time) { return sysfs_get_freq_stats(cpu, total_time); } void cpufreq_put_stats(struct cpufreq_stats *any) { struct cpufreq_stats *tmp, *next; if (!any) return; tmp = any->first; while (tmp) { next = tmp->next; free(tmp); tmp = next; } } unsigned long cpufreq_get_transitions(unsigned int cpu) { return sysfs_get_freq_transitions(cpu); }
gpl-2.0
sdonati84/GalaxyS3_Kernel
drivers/rtc/rtc-starfire.c
10247
1745
/* rtc-starfire.c: Starfire platform RTC driver. * * Copyright (C) 2008 David S. Miller <davem@davemloft.net> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/rtc.h> #include <linux/platform_device.h> #include <asm/oplib.h> MODULE_AUTHOR("David S. Miller <davem@davemloft.net>"); MODULE_DESCRIPTION("Starfire RTC driver"); MODULE_LICENSE("GPL"); static u32 starfire_get_time(void) { static char obp_gettod[32]; static u32 unix_tod; sprintf(obp_gettod, "h# %08x unix-gettod", (unsigned int) (long) &unix_tod); prom_feval(obp_gettod); return unix_tod; } static int starfire_read_time(struct device *dev, struct rtc_time *tm) { rtc_time_to_tm(starfire_get_time(), tm); return rtc_valid_tm(tm); } static const struct rtc_class_ops starfire_rtc_ops = { .read_time = starfire_read_time, }; static int __init starfire_rtc_probe(struct platform_device *pdev) { struct rtc_device *rtc = rtc_device_register("starfire", &pdev->dev, &starfire_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) return PTR_ERR(rtc); platform_set_drvdata(pdev, rtc); return 0; } static int __exit starfire_rtc_remove(struct platform_device *pdev) { struct rtc_device *rtc = platform_get_drvdata(pdev); rtc_device_unregister(rtc); return 0; } static struct platform_driver starfire_rtc_driver = { .driver = { .name = "rtc-starfire", .owner = THIS_MODULE, }, .remove = __exit_p(starfire_rtc_remove), }; static int __init starfire_rtc_init(void) { return platform_driver_probe(&starfire_rtc_driver, starfire_rtc_probe); } static void __exit starfire_rtc_exit(void) { platform_driver_unregister(&starfire_rtc_driver); } module_init(starfire_rtc_init); module_exit(starfire_rtc_exit);
gpl-2.0
Klozz/kernel_msm_osprey-stock
arch/m68k/atari/stdma.c
10503
5111
/* * linux/arch/m68k/atari/stmda.c * * Copyright (C) 1994 Roman Hodek * * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ /* This file contains some function for controlling the access to the */ /* ST-DMA chip that may be shared between devices. Currently we have: */ /* TT: Floppy and ACSI bus */ /* Falcon: Floppy and SCSI */ /* */ /* The controlling functions set up a wait queue for access to the */ /* ST-DMA chip. Callers to stdma_lock() that cannot granted access are */ /* put onto a queue and waked up later if the owner calls */ /* stdma_release(). Additionally, the caller gives his interrupt */ /* service routine to stdma_lock(). */ /* */ /* On the Falcon, the IDE bus uses just the ACSI/Floppy interrupt, but */ /* not the ST-DMA chip itself. So falhd.c needs not to lock the */ /* chip. The interrupt is routed to falhd.c if IDE is configured, the */ /* model is a Falcon and the interrupt was caused by the HD controller */ /* (can be determined by looking at its status register). */ #include <linux/types.h> #include <linux/kdev_t.h> #include <linux/genhd.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/wait.h> #include <linux/module.h> #include <asm/atari_stdma.h> #include <asm/atariints.h> #include <asm/atarihw.h> #include <asm/io.h> #include <asm/irq.h> static int stdma_locked; /* the semaphore */ /* int func to be called */ static irq_handler_t stdma_isr; static void *stdma_isr_data; /* data passed to isr */ static DECLARE_WAIT_QUEUE_HEAD(stdma_wait); /* wait queue for ST-DMA */ /***************************** Prototypes *****************************/ static irqreturn_t stdma_int (int irq, void *dummy); /************************* End of Prototypes **************************/ /* * Function: void stdma_lock( isrfunc isr, void *data ) * * Purpose: Tries to get a lock on the ST-DMA chip that is used by more * then one device driver. Waits on stdma_wait until lock is free. * stdma_lock() may not be called from an interrupt! You have to * get the lock in your main routine and release it when your * request is finished. * * Inputs: A interrupt function that is called until the lock is * released. * * Returns: nothing * */ void stdma_lock(irq_handler_t handler, void *data) { unsigned long flags; local_irq_save(flags); /* protect lock */ /* Since the DMA is used for file system purposes, we have to sleep uninterruptible (there may be locked buffers) */ wait_event(stdma_wait, !stdma_locked); stdma_locked = 1; stdma_isr = handler; stdma_isr_data = data; local_irq_restore(flags); } EXPORT_SYMBOL(stdma_lock); /* * Function: void stdma_release( void ) * * Purpose: Releases the lock on the ST-DMA chip. * * Inputs: none * * Returns: nothing * */ void stdma_release(void) { unsigned long flags; local_irq_save(flags); stdma_locked = 0; stdma_isr = NULL; stdma_isr_data = NULL; wake_up(&stdma_wait); local_irq_restore(flags); } EXPORT_SYMBOL(stdma_release); /* * Function: int stdma_others_waiting( void ) * * Purpose: Check if someone waits for the ST-DMA lock. * * Inputs: none * * Returns: 0 if no one is waiting, != 0 otherwise * */ int stdma_others_waiting(void) { return waitqueue_active(&stdma_wait); } EXPORT_SYMBOL(stdma_others_waiting); /* * Function: int stdma_islocked( void ) * * Purpose: Check if the ST-DMA is currently locked. * Note: Returned status is only valid if ints are disabled while calling and * as long as they remain disabled. * If called with ints enabled, status can change only from locked to * unlocked, because ints may not lock the ST-DMA. * * Inputs: none * * Returns: != 0 if locked, 0 otherwise * */ int stdma_islocked(void) { return stdma_locked; } EXPORT_SYMBOL(stdma_islocked); /* * Function: void stdma_init( void ) * * Purpose: Initialize the ST-DMA chip access controlling. * It sets up the interrupt and its service routine. The int is registered * as slow int, client devices have to live with that (no problem * currently). * * Inputs: none * * Return: nothing * */ void __init stdma_init(void) { stdma_isr = NULL; if (request_irq(IRQ_MFP_FDC, stdma_int, IRQ_TYPE_SLOW | IRQF_SHARED, "ST-DMA floppy,ACSI,IDE,Falcon-SCSI", stdma_int)) pr_err("Couldn't register ST-DMA interrupt\n"); } /* * Function: void stdma_int() * * Purpose: The interrupt routine for the ST-DMA. It calls the isr * registered by stdma_lock(). * */ static irqreturn_t stdma_int(int irq, void *dummy) { if (stdma_isr) (*stdma_isr)(irq, stdma_isr_data); return IRQ_HANDLED; }
gpl-2.0
Radium-Devices/Radium_falcon
net/irda/irlap.c
10503
34518
/********************************************************************* * * Filename: irlap.c * Version: 1.0 * Description: IrLAP implementation for Linux * Status: Stable * Author: Dag Brattli <dagb@cs.uit.no> * Created at: Mon Aug 4 20:40:53 1997 * Modified at: Tue Dec 14 09:26:44 1999 * Modified by: Dag Brattli <dagb@cs.uit.no> * * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved. * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA * ********************************************************************/ #include <linux/slab.h> #include <linux/string.h> #include <linux/skbuff.h> #include <linux/delay.h> #include <linux/proc_fs.h> #include <linux/init.h> #include <linux/random.h> #include <linux/module.h> #include <linux/seq_file.h> #include <net/irda/irda.h> #include <net/irda/irda_device.h> #include <net/irda/irqueue.h> #include <net/irda/irlmp.h> #include <net/irda/irlmp_frame.h> #include <net/irda/irlap_frame.h> #include <net/irda/irlap.h> #include <net/irda/timer.h> #include <net/irda/qos.h> static hashbin_t *irlap = NULL; int sysctl_slot_timeout = SLOT_TIMEOUT * 1000 / HZ; /* This is the delay of missed pf period before generating an event * to the application. The spec mandate 3 seconds, but in some cases * it's way too long. - Jean II */ int sysctl_warn_noreply_time = 3; extern void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb); static void __irlap_close(struct irlap_cb *self); static void irlap_init_qos_capabilities(struct irlap_cb *self, struct qos_info *qos_user); #ifdef CONFIG_IRDA_DEBUG static const char *const lap_reasons[] = { "ERROR, NOT USED", "LAP_DISC_INDICATION", "LAP_NO_RESPONSE", "LAP_RESET_INDICATION", "LAP_FOUND_NONE", "LAP_MEDIA_BUSY", "LAP_PRIMARY_CONFLICT", "ERROR, NOT USED", }; #endif /* CONFIG_IRDA_DEBUG */ int __init irlap_init(void) { /* Check if the compiler did its job properly. * May happen on some ARM configuration, check with Russell King. */ IRDA_ASSERT(sizeof(struct xid_frame) == 14, ;); IRDA_ASSERT(sizeof(struct test_frame) == 10, ;); IRDA_ASSERT(sizeof(struct ua_frame) == 10, ;); IRDA_ASSERT(sizeof(struct snrm_frame) == 11, ;); /* Allocate master array */ irlap = hashbin_new(HB_LOCK); if (irlap == NULL) { IRDA_ERROR("%s: can't allocate irlap hashbin!\n", __func__); return -ENOMEM; } return 0; } void irlap_cleanup(void) { IRDA_ASSERT(irlap != NULL, return;); hashbin_delete(irlap, (FREE_FUNC) __irlap_close); } /* * Function irlap_open (driver) * * Initialize IrLAP layer * */ struct irlap_cb *irlap_open(struct net_device *dev, struct qos_info *qos, const char *hw_name) { struct irlap_cb *self; IRDA_DEBUG(4, "%s()\n", __func__); /* Initialize the irlap structure. */ self = kzalloc(sizeof(struct irlap_cb), GFP_KERNEL); if (self == NULL) return NULL; self->magic = LAP_MAGIC; /* Make a binding between the layers */ self->netdev = dev; self->qos_dev = qos; /* Copy hardware name */ if(hw_name != NULL) { strlcpy(self->hw_name, hw_name, sizeof(self->hw_name)); } else { self->hw_name[0] = '\0'; } /* FIXME: should we get our own field? */ dev->atalk_ptr = self; self->state = LAP_OFFLINE; /* Initialize transmit queue */ skb_queue_head_init(&self->txq); skb_queue_head_init(&self->txq_ultra); skb_queue_head_init(&self->wx_list); /* My unique IrLAP device address! */ /* We don't want the broadcast address, neither the NULL address * (most often used to signify "invalid"), and we don't want an * address already in use (otherwise connect won't be able * to select the proper link). - Jean II */ do { get_random_bytes(&self->saddr, sizeof(self->saddr)); } while ((self->saddr == 0x0) || (self->saddr == BROADCAST) || (hashbin_lock_find(irlap, self->saddr, NULL)) ); /* Copy to the driver */ memcpy(dev->dev_addr, &self->saddr, 4); init_timer(&self->slot_timer); init_timer(&self->query_timer); init_timer(&self->discovery_timer); init_timer(&self->final_timer); init_timer(&self->poll_timer); init_timer(&self->wd_timer); init_timer(&self->backoff_timer); init_timer(&self->media_busy_timer); irlap_apply_default_connection_parameters(self); self->N3 = 3; /* # connections attempts to try before giving up */ self->state = LAP_NDM; hashbin_insert(irlap, (irda_queue_t *) self, self->saddr, NULL); irlmp_register_link(self, self->saddr, &self->notify); return self; } EXPORT_SYMBOL(irlap_open); /* * Function __irlap_close (self) * * Remove IrLAP and all allocated memory. Stop any pending timers. * */ static void __irlap_close(struct irlap_cb *self) { IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); /* Stop timers */ del_timer(&self->slot_timer); del_timer(&self->query_timer); del_timer(&self->discovery_timer); del_timer(&self->final_timer); del_timer(&self->poll_timer); del_timer(&self->wd_timer); del_timer(&self->backoff_timer); del_timer(&self->media_busy_timer); irlap_flush_all_queues(self); self->magic = 0; kfree(self); } /* * Function irlap_close (self) * * Remove IrLAP instance * */ void irlap_close(struct irlap_cb *self) { struct irlap_cb *lap; IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); /* We used to send a LAP_DISC_INDICATION here, but this was * racy. This has been move within irlmp_unregister_link() * itself. Jean II */ /* Kill the LAP and all LSAPs on top of it */ irlmp_unregister_link(self->saddr); self->notify.instance = NULL; /* Be sure that we manage to remove ourself from the hash */ lap = hashbin_remove(irlap, self->saddr, NULL); if (!lap) { IRDA_DEBUG(1, "%s(), Didn't find myself!\n", __func__); return; } __irlap_close(lap); } EXPORT_SYMBOL(irlap_close); /* * Function irlap_connect_indication (self, skb) * * Another device is attempting to make a connection * */ void irlap_connect_indication(struct irlap_cb *self, struct sk_buff *skb) { IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); irlap_init_qos_capabilities(self, NULL); /* No user QoS! */ irlmp_link_connect_indication(self->notify.instance, self->saddr, self->daddr, &self->qos_tx, skb); } /* * Function irlap_connect_response (self, skb) * * Service user has accepted incoming connection * */ void irlap_connect_response(struct irlap_cb *self, struct sk_buff *userdata) { IRDA_DEBUG(4, "%s()\n", __func__); irlap_do_event(self, CONNECT_RESPONSE, userdata, NULL); } /* * Function irlap_connect_request (self, daddr, qos_user, sniff) * * Request connection with another device, sniffing is not implemented * yet. * */ void irlap_connect_request(struct irlap_cb *self, __u32 daddr, struct qos_info *qos_user, int sniff) { IRDA_DEBUG(3, "%s(), daddr=0x%08x\n", __func__, daddr); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); self->daddr = daddr; /* * If the service user specifies QoS values for this connection, * then use them */ irlap_init_qos_capabilities(self, qos_user); if ((self->state == LAP_NDM) && !self->media_busy) irlap_do_event(self, CONNECT_REQUEST, NULL, NULL); else self->connect_pending = TRUE; } /* * Function irlap_connect_confirm (self, skb) * * Connection request has been accepted * */ void irlap_connect_confirm(struct irlap_cb *self, struct sk_buff *skb) { IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); irlmp_link_connect_confirm(self->notify.instance, &self->qos_tx, skb); } /* * Function irlap_data_indication (self, skb) * * Received data frames from IR-port, so we just pass them up to * IrLMP for further processing * */ void irlap_data_indication(struct irlap_cb *self, struct sk_buff *skb, int unreliable) { /* Hide LAP header from IrLMP layer */ skb_pull(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER); irlmp_link_data_indication(self->notify.instance, skb, unreliable); } /* * Function irlap_data_request (self, skb) * * Queue data for transmission, must wait until XMIT state * */ void irlap_data_request(struct irlap_cb *self, struct sk_buff *skb, int unreliable) { IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); IRDA_DEBUG(3, "%s()\n", __func__); IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER), return;); skb_push(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER); /* * Must set frame format now so that the rest of the code knows * if its dealing with an I or an UI frame */ if (unreliable) skb->data[1] = UI_FRAME; else skb->data[1] = I_FRAME; /* Don't forget to refcount it - see irlmp_connect_request(). */ skb_get(skb); /* Add at the end of the queue (keep ordering) - Jean II */ skb_queue_tail(&self->txq, skb); /* * Send event if this frame only if we are in the right state * FIXME: udata should be sent first! (skb_queue_head?) */ if ((self->state == LAP_XMIT_P) || (self->state == LAP_XMIT_S)) { /* If we are not already processing the Tx queue, trigger * transmission immediately - Jean II */ if((skb_queue_len(&self->txq) <= 1) && (!self->local_busy)) irlap_do_event(self, DATA_REQUEST, skb, NULL); /* Otherwise, the packets will be sent normally at the * next pf-poll - Jean II */ } } /* * Function irlap_unitdata_request (self, skb) * * Send Ultra data. This is data that must be sent outside any connection * */ #ifdef CONFIG_IRDA_ULTRA void irlap_unitdata_request(struct irlap_cb *self, struct sk_buff *skb) { IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); IRDA_DEBUG(3, "%s()\n", __func__); IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER), return;); skb_push(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER); skb->data[0] = CBROADCAST; skb->data[1] = UI_FRAME; /* Don't need to refcount, see irlmp_connless_data_request() */ skb_queue_tail(&self->txq_ultra, skb); irlap_do_event(self, SEND_UI_FRAME, NULL, NULL); } #endif /*CONFIG_IRDA_ULTRA */ /* * Function irlap_udata_indication (self, skb) * * Receive Ultra data. This is data that is received outside any connection * */ #ifdef CONFIG_IRDA_ULTRA void irlap_unitdata_indication(struct irlap_cb *self, struct sk_buff *skb) { IRDA_DEBUG(1, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); /* Hide LAP header from IrLMP layer */ skb_pull(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER); irlmp_link_unitdata_indication(self->notify.instance, skb); } #endif /* CONFIG_IRDA_ULTRA */ /* * Function irlap_disconnect_request (void) * * Request to disconnect connection by service user */ void irlap_disconnect_request(struct irlap_cb *self) { IRDA_DEBUG(3, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); /* Don't disconnect until all data frames are successfully sent */ if (!skb_queue_empty(&self->txq)) { self->disconnect_pending = TRUE; return; } /* Check if we are in the right state for disconnecting */ switch (self->state) { case LAP_XMIT_P: /* FALLTHROUGH */ case LAP_XMIT_S: /* FALLTHROUGH */ case LAP_CONN: /* FALLTHROUGH */ case LAP_RESET_WAIT: /* FALLTHROUGH */ case LAP_RESET_CHECK: irlap_do_event(self, DISCONNECT_REQUEST, NULL, NULL); break; default: IRDA_DEBUG(2, "%s(), disconnect pending!\n", __func__); self->disconnect_pending = TRUE; break; } } /* * Function irlap_disconnect_indication (void) * * Disconnect request from other device * */ void irlap_disconnect_indication(struct irlap_cb *self, LAP_REASON reason) { IRDA_DEBUG(1, "%s(), reason=%s\n", __func__, lap_reasons[reason]); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); /* Flush queues */ irlap_flush_all_queues(self); switch (reason) { case LAP_RESET_INDICATION: IRDA_DEBUG(1, "%s(), Sending reset request!\n", __func__); irlap_do_event(self, RESET_REQUEST, NULL, NULL); break; case LAP_NO_RESPONSE: /* FALLTHROUGH */ case LAP_DISC_INDICATION: /* FALLTHROUGH */ case LAP_FOUND_NONE: /* FALLTHROUGH */ case LAP_MEDIA_BUSY: irlmp_link_disconnect_indication(self->notify.instance, self, reason, NULL); break; default: IRDA_ERROR("%s: Unknown reason %d\n", __func__, reason); } } /* * Function irlap_discovery_request (gen_addr_bit) * * Start one single discovery operation. * */ void irlap_discovery_request(struct irlap_cb *self, discovery_t *discovery) { struct irlap_info info; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); IRDA_ASSERT(discovery != NULL, return;); IRDA_DEBUG(4, "%s(), nslots = %d\n", __func__, discovery->nslots); IRDA_ASSERT((discovery->nslots == 1) || (discovery->nslots == 6) || (discovery->nslots == 8) || (discovery->nslots == 16), return;); /* Discovery is only possible in NDM mode */ if (self->state != LAP_NDM) { IRDA_DEBUG(4, "%s(), discovery only possible in NDM mode\n", __func__); irlap_discovery_confirm(self, NULL); /* Note : in theory, if we are not in NDM, we could postpone * the discovery like we do for connection request. * In practice, it's not worth it. If the media was busy, * it's likely next time around it won't be busy. If we are * in REPLY state, we will get passive discovery info & event. * Jean II */ return; } /* Check if last discovery request finished in time, or if * it was aborted due to the media busy flag. */ if (self->discovery_log != NULL) { hashbin_delete(self->discovery_log, (FREE_FUNC) kfree); self->discovery_log = NULL; } /* All operations will occur at predictable time, no need to lock */ self->discovery_log = hashbin_new(HB_NOLOCK); if (self->discovery_log == NULL) { IRDA_WARNING("%s(), Unable to allocate discovery log!\n", __func__); return; } info.S = discovery->nslots; /* Number of slots */ info.s = 0; /* Current slot */ self->discovery_cmd = discovery; info.discovery = discovery; /* sysctl_slot_timeout bounds are checked in irsysctl.c - Jean II */ self->slot_timeout = sysctl_slot_timeout * HZ / 1000; irlap_do_event(self, DISCOVERY_REQUEST, NULL, &info); } /* * Function irlap_discovery_confirm (log) * * A device has been discovered in front of this station, we * report directly to LMP. */ void irlap_discovery_confirm(struct irlap_cb *self, hashbin_t *discovery_log) { IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); IRDA_ASSERT(self->notify.instance != NULL, return;); /* * Check for successful discovery, since we are then allowed to clear * the media busy condition (IrLAP 6.13.4 - p.94). This should allow * us to make connection attempts much faster and easier (i.e. no * collisions). * Setting media busy to false will also generate an event allowing * to process pending events in NDM state machine. * Note : the spec doesn't define what's a successful discovery is. * If we want Ultra to work, it's successful even if there is * nobody discovered - Jean II */ if (discovery_log) irda_device_set_media_busy(self->netdev, FALSE); /* Inform IrLMP */ irlmp_link_discovery_confirm(self->notify.instance, discovery_log); } /* * Function irlap_discovery_indication (log) * * Somebody is trying to discover us! * */ void irlap_discovery_indication(struct irlap_cb *self, discovery_t *discovery) { IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); IRDA_ASSERT(discovery != NULL, return;); IRDA_ASSERT(self->notify.instance != NULL, return;); /* A device is very likely to connect immediately after it performs * a successful discovery. This means that in our case, we are much * more likely to receive a connection request over the medium. * So, we backoff to avoid collisions. * IrLAP spec 6.13.4 suggest 100ms... * Note : this little trick actually make a *BIG* difference. If I set * my Linux box with discovery enabled and one Ultra frame sent every * second, my Palm has no trouble connecting to it every time ! * Jean II */ irda_device_set_media_busy(self->netdev, SMALL); irlmp_link_discovery_indication(self->notify.instance, discovery); } /* * Function irlap_status_indication (quality_of_link) */ void irlap_status_indication(struct irlap_cb *self, int quality_of_link) { switch (quality_of_link) { case STATUS_NO_ACTIVITY: IRDA_MESSAGE("IrLAP, no activity on link!\n"); break; case STATUS_NOISY: IRDA_MESSAGE("IrLAP, noisy link!\n"); break; default: break; } irlmp_status_indication(self->notify.instance, quality_of_link, LOCK_NO_CHANGE); } /* * Function irlap_reset_indication (void) */ void irlap_reset_indication(struct irlap_cb *self) { IRDA_DEBUG(1, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); if (self->state == LAP_RESET_WAIT) irlap_do_event(self, RESET_REQUEST, NULL, NULL); else irlap_do_event(self, RESET_RESPONSE, NULL, NULL); } /* * Function irlap_reset_confirm (void) */ void irlap_reset_confirm(void) { IRDA_DEBUG(1, "%s()\n", __func__); } /* * Function irlap_generate_rand_time_slot (S, s) * * Generate a random time slot between s and S-1 where * S = Number of slots (0 -> S-1) * s = Current slot */ int irlap_generate_rand_time_slot(int S, int s) { static int rand; int slot; IRDA_ASSERT((S - s) > 0, return 0;); rand += jiffies; rand ^= (rand << 12); rand ^= (rand >> 20); slot = s + rand % (S-s); IRDA_ASSERT((slot >= s) || (slot < S), return 0;); return slot; } /* * Function irlap_update_nr_received (nr) * * Remove all acknowledged frames in current window queue. This code is * not intuitive and you should not try to change it. If you think it * contains bugs, please mail a patch to the author instead. */ void irlap_update_nr_received(struct irlap_cb *self, int nr) { struct sk_buff *skb = NULL; int count = 0; /* * Remove all the ack-ed frames from the window queue. */ /* * Optimize for the common case. It is most likely that the receiver * will acknowledge all the frames we have sent! So in that case we * delete all frames stored in window. */ if (nr == self->vs) { while ((skb = skb_dequeue(&self->wx_list)) != NULL) { dev_kfree_skb(skb); } /* The last acked frame is the next to send minus one */ self->va = nr - 1; } else { /* Remove all acknowledged frames in current window */ while ((skb_peek(&self->wx_list) != NULL) && (((self->va+1) % 8) != nr)) { skb = skb_dequeue(&self->wx_list); dev_kfree_skb(skb); self->va = (self->va + 1) % 8; count++; } } /* Advance window */ self->window = self->window_size - skb_queue_len(&self->wx_list); } /* * Function irlap_validate_ns_received (ns) * * Validate the next to send (ns) field from received frame. */ int irlap_validate_ns_received(struct irlap_cb *self, int ns) { /* ns as expected? */ if (ns == self->vr) return NS_EXPECTED; /* * Stations are allowed to treat invalid NS as unexpected NS * IrLAP, Recv ... with-invalid-Ns. p. 84 */ return NS_UNEXPECTED; /* return NR_INVALID; */ } /* * Function irlap_validate_nr_received (nr) * * Validate the next to receive (nr) field from received frame. * */ int irlap_validate_nr_received(struct irlap_cb *self, int nr) { /* nr as expected? */ if (nr == self->vs) { IRDA_DEBUG(4, "%s(), expected!\n", __func__); return NR_EXPECTED; } /* * unexpected nr? (but within current window), first we check if the * ns numbers of the frames in the current window wrap. */ if (self->va < self->vs) { if ((nr >= self->va) && (nr <= self->vs)) return NR_UNEXPECTED; } else { if ((nr >= self->va) || (nr <= self->vs)) return NR_UNEXPECTED; } /* Invalid nr! */ return NR_INVALID; } /* * Function irlap_initiate_connection_state () * * Initialize the connection state parameters * */ void irlap_initiate_connection_state(struct irlap_cb *self) { IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); /* Next to send and next to receive */ self->vs = self->vr = 0; /* Last frame which got acked (0 - 1) % 8 */ self->va = 7; self->window = 1; self->remote_busy = FALSE; self->retry_count = 0; } /* * Function irlap_wait_min_turn_around (self, qos) * * Wait negotiated minimum turn around time, this function actually sets * the number of BOS's that must be sent before the next transmitted * frame in order to delay for the specified amount of time. This is * done to avoid using timers, and the forbidden udelay! */ void irlap_wait_min_turn_around(struct irlap_cb *self, struct qos_info *qos) { __u32 min_turn_time; __u32 speed; /* Get QoS values. */ speed = qos->baud_rate.value; min_turn_time = qos->min_turn_time.value; /* No need to calculate XBOFs for speeds over 115200 bps */ if (speed > 115200) { self->mtt_required = min_turn_time; return; } /* * Send additional BOF's for the next frame for the requested * min turn time, so now we must calculate how many chars (XBOF's) we * must send for the requested time period (min turn time) */ self->xbofs_delay = irlap_min_turn_time_in_bytes(speed, min_turn_time); } /* * Function irlap_flush_all_queues (void) * * Flush all queues * */ void irlap_flush_all_queues(struct irlap_cb *self) { struct sk_buff* skb; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); /* Free transmission queue */ while ((skb = skb_dequeue(&self->txq)) != NULL) dev_kfree_skb(skb); while ((skb = skb_dequeue(&self->txq_ultra)) != NULL) dev_kfree_skb(skb); /* Free sliding window buffered packets */ while ((skb = skb_dequeue(&self->wx_list)) != NULL) dev_kfree_skb(skb); } /* * Function irlap_setspeed (self, speed) * * Change the speed of the IrDA port * */ static void irlap_change_speed(struct irlap_cb *self, __u32 speed, int now) { struct sk_buff *skb; IRDA_DEBUG(0, "%s(), setting speed to %d\n", __func__, speed); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); self->speed = speed; /* Change speed now, or just piggyback speed on frames */ if (now) { /* Send down empty frame to trigger speed change */ skb = alloc_skb(0, GFP_ATOMIC); if (skb) irlap_queue_xmit(self, skb); } } /* * Function irlap_init_qos_capabilities (self, qos) * * Initialize QoS for this IrLAP session, What we do is to compute the * intersection of the QoS capabilities for the user, driver and for * IrLAP itself. Normally, IrLAP will not specify any values, but it can * be used to restrict certain values. */ static void irlap_init_qos_capabilities(struct irlap_cb *self, struct qos_info *qos_user) { IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); IRDA_ASSERT(self->netdev != NULL, return;); /* Start out with the maximum QoS support possible */ irda_init_max_qos_capabilies(&self->qos_rx); /* Apply drivers QoS capabilities */ irda_qos_compute_intersection(&self->qos_rx, self->qos_dev); /* * Check for user supplied QoS parameters. The service user is only * allowed to supply these values. We check each parameter since the * user may not have set all of them. */ if (qos_user) { IRDA_DEBUG(1, "%s(), Found user specified QoS!\n", __func__); if (qos_user->baud_rate.bits) self->qos_rx.baud_rate.bits &= qos_user->baud_rate.bits; if (qos_user->max_turn_time.bits) self->qos_rx.max_turn_time.bits &= qos_user->max_turn_time.bits; if (qos_user->data_size.bits) self->qos_rx.data_size.bits &= qos_user->data_size.bits; if (qos_user->link_disc_time.bits) self->qos_rx.link_disc_time.bits &= qos_user->link_disc_time.bits; } /* Use 500ms in IrLAP for now */ self->qos_rx.max_turn_time.bits &= 0x01; /* Set data size */ /*self->qos_rx.data_size.bits &= 0x03;*/ irda_qos_bits_to_value(&self->qos_rx); } /* * Function irlap_apply_default_connection_parameters (void, now) * * Use the default connection and transmission parameters */ void irlap_apply_default_connection_parameters(struct irlap_cb *self) { IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); /* xbofs : Default value in NDM */ self->next_bofs = 12; self->bofs_count = 12; /* NDM Speed is 9600 */ irlap_change_speed(self, 9600, TRUE); /* Set mbusy when going to NDM state */ irda_device_set_media_busy(self->netdev, TRUE); /* * Generate random connection address for this session, which must * be 7 bits wide and different from 0x00 and 0xfe */ while ((self->caddr == 0x00) || (self->caddr == 0xfe)) { get_random_bytes(&self->caddr, sizeof(self->caddr)); self->caddr &= 0xfe; } /* Use default values until connection has been negitiated */ self->slot_timeout = sysctl_slot_timeout; self->final_timeout = FINAL_TIMEOUT; self->poll_timeout = POLL_TIMEOUT; self->wd_timeout = WD_TIMEOUT; /* Set some default values */ self->qos_tx.baud_rate.value = 9600; self->qos_rx.baud_rate.value = 9600; self->qos_tx.max_turn_time.value = 0; self->qos_rx.max_turn_time.value = 0; self->qos_tx.min_turn_time.value = 0; self->qos_rx.min_turn_time.value = 0; self->qos_tx.data_size.value = 64; self->qos_rx.data_size.value = 64; self->qos_tx.window_size.value = 1; self->qos_rx.window_size.value = 1; self->qos_tx.additional_bofs.value = 12; self->qos_rx.additional_bofs.value = 12; self->qos_tx.link_disc_time.value = 0; self->qos_rx.link_disc_time.value = 0; irlap_flush_all_queues(self); self->disconnect_pending = FALSE; self->connect_pending = FALSE; } /* * Function irlap_apply_connection_parameters (qos, now) * * Initialize IrLAP with the negotiated QoS values * * If 'now' is false, the speed and xbofs will be changed after the next * frame is sent. * If 'now' is true, the speed and xbofs is changed immediately */ void irlap_apply_connection_parameters(struct irlap_cb *self, int now) { IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); /* Set the negotiated xbofs value */ self->next_bofs = self->qos_tx.additional_bofs.value; if (now) self->bofs_count = self->next_bofs; /* Set the negotiated link speed (may need the new xbofs value) */ irlap_change_speed(self, self->qos_tx.baud_rate.value, now); self->window_size = self->qos_tx.window_size.value; self->window = self->qos_tx.window_size.value; #ifdef CONFIG_IRDA_DYNAMIC_WINDOW /* * Calculate how many bytes it is possible to transmit before the * link must be turned around */ self->line_capacity = irlap_max_line_capacity(self->qos_tx.baud_rate.value, self->qos_tx.max_turn_time.value); self->bytes_left = self->line_capacity; #endif /* CONFIG_IRDA_DYNAMIC_WINDOW */ /* * Initialize timeout values, some of the rules are listed on * page 92 in IrLAP. */ IRDA_ASSERT(self->qos_tx.max_turn_time.value != 0, return;); IRDA_ASSERT(self->qos_rx.max_turn_time.value != 0, return;); /* The poll timeout applies only to the primary station. * It defines the maximum time the primary stay in XMIT mode * before timeout and turning the link around (sending a RR). * Or, this is how much we can keep the pf bit in primary mode. * Therefore, it must be lower or equal than our *OWN* max turn around. * Jean II */ self->poll_timeout = self->qos_tx.max_turn_time.value * HZ / 1000; /* The Final timeout applies only to the primary station. * It defines the maximum time the primary wait (mostly in RECV mode) * for an answer from the secondary station before polling it again. * Therefore, it must be greater or equal than our *PARTNER* * max turn around time - Jean II */ self->final_timeout = self->qos_rx.max_turn_time.value * HZ / 1000; /* The Watchdog Bit timeout applies only to the secondary station. * It defines the maximum time the secondary wait (mostly in RECV mode) * for poll from the primary station before getting annoyed. * Therefore, it must be greater or equal than our *PARTNER* * max turn around time - Jean II */ self->wd_timeout = self->final_timeout * 2; /* * N1 and N2 are maximum retry count for *both* the final timer * and the wd timer (with a factor 2) as defined above. * After N1 retry of a timer, we give a warning to the user. * After N2 retry, we consider the link dead and disconnect it. * Jean II */ /* * Set N1 to 0 if Link Disconnect/Threshold Time = 3 and set it to * 3 seconds otherwise. See page 71 in IrLAP for more details. * Actually, it's not always 3 seconds, as we allow to set * it via sysctl... Max maxtt is 500ms, and N1 need to be multiple * of 2, so 1 second is minimum we can allow. - Jean II */ if (self->qos_tx.link_disc_time.value == sysctl_warn_noreply_time) /* * If we set N1 to 0, it will trigger immediately, which is * not what we want. What we really want is to disable it, * Jean II */ self->N1 = -2; /* Disable - Need to be multiple of 2*/ else self->N1 = sysctl_warn_noreply_time * 1000 / self->qos_rx.max_turn_time.value; IRDA_DEBUG(4, "Setting N1 = %d\n", self->N1); /* Set N2 to match our own disconnect time */ self->N2 = self->qos_tx.link_disc_time.value * 1000 / self->qos_rx.max_turn_time.value; IRDA_DEBUG(4, "Setting N2 = %d\n", self->N2); } #ifdef CONFIG_PROC_FS struct irlap_iter_state { int id; }; static void *irlap_seq_start(struct seq_file *seq, loff_t *pos) { struct irlap_iter_state *iter = seq->private; struct irlap_cb *self; /* Protect our access to the tsap list */ spin_lock_irq(&irlap->hb_spinlock); iter->id = 0; for (self = (struct irlap_cb *) hashbin_get_first(irlap); self; self = (struct irlap_cb *) hashbin_get_next(irlap)) { if (iter->id == *pos) break; ++iter->id; } return self; } static void *irlap_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct irlap_iter_state *iter = seq->private; ++*pos; ++iter->id; return (void *) hashbin_get_next(irlap); } static void irlap_seq_stop(struct seq_file *seq, void *v) { spin_unlock_irq(&irlap->hb_spinlock); } static int irlap_seq_show(struct seq_file *seq, void *v) { const struct irlap_iter_state *iter = seq->private; const struct irlap_cb *self = v; IRDA_ASSERT(self->magic == LAP_MAGIC, return -EINVAL;); seq_printf(seq, "irlap%d ", iter->id); seq_printf(seq, "state: %s\n", irlap_state[self->state]); seq_printf(seq, " device name: %s, ", (self->netdev) ? self->netdev->name : "bug"); seq_printf(seq, "hardware name: %s\n", self->hw_name); seq_printf(seq, " caddr: %#02x, ", self->caddr); seq_printf(seq, "saddr: %#08x, ", self->saddr); seq_printf(seq, "daddr: %#08x\n", self->daddr); seq_printf(seq, " win size: %d, ", self->window_size); seq_printf(seq, "win: %d, ", self->window); #ifdef CONFIG_IRDA_DYNAMIC_WINDOW seq_printf(seq, "line capacity: %d, ", self->line_capacity); seq_printf(seq, "bytes left: %d\n", self->bytes_left); #endif /* CONFIG_IRDA_DYNAMIC_WINDOW */ seq_printf(seq, " tx queue len: %d ", skb_queue_len(&self->txq)); seq_printf(seq, "win queue len: %d ", skb_queue_len(&self->wx_list)); seq_printf(seq, "rbusy: %s", self->remote_busy ? "TRUE" : "FALSE"); seq_printf(seq, " mbusy: %s\n", self->media_busy ? "TRUE" : "FALSE"); seq_printf(seq, " retrans: %d ", self->retry_count); seq_printf(seq, "vs: %d ", self->vs); seq_printf(seq, "vr: %d ", self->vr); seq_printf(seq, "va: %d\n", self->va); seq_printf(seq, " qos\tbps\tmaxtt\tdsize\twinsize\taddbofs\tmintt\tldisc\tcomp\n"); seq_printf(seq, " tx\t%d\t", self->qos_tx.baud_rate.value); seq_printf(seq, "%d\t", self->qos_tx.max_turn_time.value); seq_printf(seq, "%d\t", self->qos_tx.data_size.value); seq_printf(seq, "%d\t", self->qos_tx.window_size.value); seq_printf(seq, "%d\t", self->qos_tx.additional_bofs.value); seq_printf(seq, "%d\t", self->qos_tx.min_turn_time.value); seq_printf(seq, "%d\t", self->qos_tx.link_disc_time.value); seq_printf(seq, "\n"); seq_printf(seq, " rx\t%d\t", self->qos_rx.baud_rate.value); seq_printf(seq, "%d\t", self->qos_rx.max_turn_time.value); seq_printf(seq, "%d\t", self->qos_rx.data_size.value); seq_printf(seq, "%d\t", self->qos_rx.window_size.value); seq_printf(seq, "%d\t", self->qos_rx.additional_bofs.value); seq_printf(seq, "%d\t", self->qos_rx.min_turn_time.value); seq_printf(seq, "%d\n", self->qos_rx.link_disc_time.value); return 0; } static const struct seq_operations irlap_seq_ops = { .start = irlap_seq_start, .next = irlap_seq_next, .stop = irlap_seq_stop, .show = irlap_seq_show, }; static int irlap_seq_open(struct inode *inode, struct file *file) { if (irlap == NULL) return -EINVAL; return seq_open_private(file, &irlap_seq_ops, sizeof(struct irlap_iter_state)); } const struct file_operations irlap_seq_fops = { .owner = THIS_MODULE, .open = irlap_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; #endif /* CONFIG_PROC_FS */
gpl-2.0
D380/android_kernel_lge_msm8226
arch/m68k/atari/stdma.c
10503
5111
/* * linux/arch/m68k/atari/stmda.c * * Copyright (C) 1994 Roman Hodek * * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ /* This file contains some function for controlling the access to the */ /* ST-DMA chip that may be shared between devices. Currently we have: */ /* TT: Floppy and ACSI bus */ /* Falcon: Floppy and SCSI */ /* */ /* The controlling functions set up a wait queue for access to the */ /* ST-DMA chip. Callers to stdma_lock() that cannot granted access are */ /* put onto a queue and waked up later if the owner calls */ /* stdma_release(). Additionally, the caller gives his interrupt */ /* service routine to stdma_lock(). */ /* */ /* On the Falcon, the IDE bus uses just the ACSI/Floppy interrupt, but */ /* not the ST-DMA chip itself. So falhd.c needs not to lock the */ /* chip. The interrupt is routed to falhd.c if IDE is configured, the */ /* model is a Falcon and the interrupt was caused by the HD controller */ /* (can be determined by looking at its status register). */ #include <linux/types.h> #include <linux/kdev_t.h> #include <linux/genhd.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/wait.h> #include <linux/module.h> #include <asm/atari_stdma.h> #include <asm/atariints.h> #include <asm/atarihw.h> #include <asm/io.h> #include <asm/irq.h> static int stdma_locked; /* the semaphore */ /* int func to be called */ static irq_handler_t stdma_isr; static void *stdma_isr_data; /* data passed to isr */ static DECLARE_WAIT_QUEUE_HEAD(stdma_wait); /* wait queue for ST-DMA */ /***************************** Prototypes *****************************/ static irqreturn_t stdma_int (int irq, void *dummy); /************************* End of Prototypes **************************/ /* * Function: void stdma_lock( isrfunc isr, void *data ) * * Purpose: Tries to get a lock on the ST-DMA chip that is used by more * then one device driver. Waits on stdma_wait until lock is free. * stdma_lock() may not be called from an interrupt! You have to * get the lock in your main routine and release it when your * request is finished. * * Inputs: A interrupt function that is called until the lock is * released. * * Returns: nothing * */ void stdma_lock(irq_handler_t handler, void *data) { unsigned long flags; local_irq_save(flags); /* protect lock */ /* Since the DMA is used for file system purposes, we have to sleep uninterruptible (there may be locked buffers) */ wait_event(stdma_wait, !stdma_locked); stdma_locked = 1; stdma_isr = handler; stdma_isr_data = data; local_irq_restore(flags); } EXPORT_SYMBOL(stdma_lock); /* * Function: void stdma_release( void ) * * Purpose: Releases the lock on the ST-DMA chip. * * Inputs: none * * Returns: nothing * */ void stdma_release(void) { unsigned long flags; local_irq_save(flags); stdma_locked = 0; stdma_isr = NULL; stdma_isr_data = NULL; wake_up(&stdma_wait); local_irq_restore(flags); } EXPORT_SYMBOL(stdma_release); /* * Function: int stdma_others_waiting( void ) * * Purpose: Check if someone waits for the ST-DMA lock. * * Inputs: none * * Returns: 0 if no one is waiting, != 0 otherwise * */ int stdma_others_waiting(void) { return waitqueue_active(&stdma_wait); } EXPORT_SYMBOL(stdma_others_waiting); /* * Function: int stdma_islocked( void ) * * Purpose: Check if the ST-DMA is currently locked. * Note: Returned status is only valid if ints are disabled while calling and * as long as they remain disabled. * If called with ints enabled, status can change only from locked to * unlocked, because ints may not lock the ST-DMA. * * Inputs: none * * Returns: != 0 if locked, 0 otherwise * */ int stdma_islocked(void) { return stdma_locked; } EXPORT_SYMBOL(stdma_islocked); /* * Function: void stdma_init( void ) * * Purpose: Initialize the ST-DMA chip access controlling. * It sets up the interrupt and its service routine. The int is registered * as slow int, client devices have to live with that (no problem * currently). * * Inputs: none * * Return: nothing * */ void __init stdma_init(void) { stdma_isr = NULL; if (request_irq(IRQ_MFP_FDC, stdma_int, IRQ_TYPE_SLOW | IRQF_SHARED, "ST-DMA floppy,ACSI,IDE,Falcon-SCSI", stdma_int)) pr_err("Couldn't register ST-DMA interrupt\n"); } /* * Function: void stdma_int() * * Purpose: The interrupt routine for the ST-DMA. It calls the isr * registered by stdma_lock(). * */ static irqreturn_t stdma_int(int irq, void *dummy) { if (stdma_isr) (*stdma_isr)(irq, stdma_isr_data); return IRQ_HANDLED; }
gpl-2.0
visi0nary/mediatek
mt6732/mediatek/kernel/drivers/fmradio/mt6627/pub/mt6627_fm_lib.c
8
60442
#include <linux/delay.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include "osal_typedef.h" #include "stp_exp.h" #include "wmt_exp.h" #include "fm_typedef.h" #include "fm_dbg.h" #include "fm_err.h" #include "fm_interface.h" #include "fm_stdlib.h" #include "fm_patch.h" #include "fm_utils.h" #include "fm_link.h" #include "fm_config.h" #include "fm_private.h" #include "mt6627_fm_reg.h" #include "mt6627_fm.h" #include "mt6627_fm_lib.h" #include "mt6627_fm_cmd.h" #include "mt6627_fm_cust_cfg.h" //#include "mach/mt_gpio.h" extern fm_cust_cfg mt6627_fm_config; //#define MT6627_FM_PATCH_PATH "/etc/firmware/mt6627/mt6627_fm_patch.bin" //#define MT6627_FM_COEFF_PATH "/etc/firmware/mt6627/mt6627_fm_coeff.bin" //#define MT6627_FM_HWCOEFF_PATH "/etc/firmware/mt6627/mt6627_fm_hwcoeff.bin" //#define MT6627_FM_ROM_PATH "/etc/firmware/mt6627/mt6627_fm_rom.bin" static struct fm_patch_tbl mt6627_patch_tbl[5] = { {FM_ROM_V1, "/etc/firmware/mt6627/mt6627_fm_v1_patch.bin", "/etc/firmware/mt6627/mt6627_fm_v1_coeff.bin", NULL, NULL}, {FM_ROM_V2, "/etc/firmware/mt6627/mt6627_fm_v2_patch.bin", "/etc/firmware/mt6627/mt6627_fm_v2_coeff.bin", NULL, NULL}, {FM_ROM_V3, "/etc/firmware/mt6627/mt6627_fm_v3_patch.bin", "/etc/firmware/mt6627/mt6627_fm_v3_coeff.bin", NULL, NULL}, {FM_ROM_V4, "/etc/firmware/mt6627/mt6627_fm_v4_patch.bin", "/etc/firmware/mt6627/mt6627_fm_v4_coeff.bin", NULL, NULL}, {FM_ROM_V5, "/etc/firmware/mt6627/mt6627_fm_v5_patch.bin", "/etc/firmware/mt6627/mt6627_fm_v5_coeff.bin", NULL, NULL}, }; static struct fm_hw_info mt6627_hw_info = { .chip_id = 0x00006627, .eco_ver = 0x00000000, .rom_ver = 0x00000000, .patch_ver = 0x00000000, .reserve = 0x00000000, }; #define PATCH_SEG_LEN 512 static fm_u8 *cmd_buf = NULL; static struct fm_lock *cmd_buf_lock = NULL; static struct fm_callback *fm_cb_op; static struct fm_res_ctx *mt6627_res = NULL; //static fm_s32 Chip_Version = mt6627_E1; //static fm_bool rssi_th_set = fm_false; #if 0//def MTK_FM_50KHZ_SUPPORT static struct fm_fifo *cqi_fifo = NULL; #endif static fm_s32 mt6627_is_dese_chan(fm_u16 freq); static fm_bool mt6627_I2S_hopping_check(fm_u16 freq); #if 0 static fm_s32 mt6627_mcu_dese(fm_u16 freq, void *arg); static fm_s32 mt6627_gps_dese(fm_u16 freq, void *arg); static fm_s32 mt6627_I2s_Setting(fm_s32 onoff, fm_s32 mode, fm_s32 sample); #endif static fm_u16 mt6627_chan_para_get(fm_u16 freq); static fm_s32 mt6627_desense_check(fm_u16 freq,fm_s32 rssi); static fm_bool mt6627_TDD_chan_check(fm_u16 freq); static fm_s32 mt6627_soft_mute_tune(fm_u16 freq,fm_s32 *rssi,fm_bool *valid); static fm_s32 mt6627_pwron(fm_s32 data) { /*//Turn on FM on 6627 chip by WMT driver if(MTK_WCN_BOOL_FALSE == mtk_wcn_wmt_func_on(WMTDRV_TYPE_LPBK)){ WCN_DBG(FM_ALT|CHIP,"WMT turn on LPBK Fail!\n"); return -FM_ELINK; }else{ WCN_DBG(FM_ALT|CHIP,"WMT turn on LPBK OK!\n"); //return 0; }*/ if (MTK_WCN_BOOL_FALSE == mtk_wcn_wmt_func_on(WMTDRV_TYPE_FM)) { WCN_DBG(FM_ALT | CHIP, "WMT turn on FM Fail!\n"); return -FM_ELINK; } else { WCN_DBG(FM_ALT | CHIP, "WMT turn on FM OK!\n"); return 0; } } static fm_s32 mt6627_pwroff(fm_s32 data) { if (MTK_WCN_BOOL_FALSE == mtk_wcn_wmt_func_off(WMTDRV_TYPE_FM)) { WCN_DBG(FM_ALT | CHIP, "WMT turn off FM Fail!\n"); return -FM_ELINK; } else { WCN_DBG(FM_NTC | CHIP, "WMT turn off FM OK!\n"); return 0; } } static fm_s32 Delayms(fm_u32 data) { WCN_DBG(FM_DBG | CHIP, "delay %dms\n", data); msleep(data); return 0; } static fm_s32 Delayus(fm_u32 data) { WCN_DBG(FM_DBG | CHIP, "delay %dus\n", data); udelay(data); return 0; } fm_s32 mt6627_get_read_result(struct fm_res_ctx* result) { FMR_ASSERT(result); mt6627_res = result; return 0; } static fm_s32 mt6627_read(fm_u8 addr, fm_u16 *val) { fm_s32 ret = 0; fm_u16 pkt_size; if (FM_LOCK(cmd_buf_lock)) return (-FM_ELOCK); pkt_size = mt6627_get_reg(cmd_buf, TX_BUF_SIZE, addr); ret = fm_cmd_tx(cmd_buf, pkt_size, FLAG_FSPI_RD, SW_RETRY_CNT, FSPI_RD_TIMEOUT, mt6627_get_read_result); if (!ret && mt6627_res) { *val = mt6627_res->fspi_rd; } FM_UNLOCK(cmd_buf_lock); return ret; } static fm_s32 mt6627_write(fm_u8 addr, fm_u16 val) { fm_s32 ret = 0; fm_u16 pkt_size; if (FM_LOCK(cmd_buf_lock)) return (-FM_ELOCK); pkt_size = mt6627_set_reg(cmd_buf, TX_BUF_SIZE, addr, val); ret = fm_cmd_tx(cmd_buf, pkt_size, FLAG_FSPI_WR, SW_RETRY_CNT, FSPI_WR_TIMEOUT, NULL); FM_UNLOCK(cmd_buf_lock); return ret; } static fm_s32 mt6627_set_bits(fm_u8 addr, fm_u16 bits, fm_u16 mask) { fm_s32 ret = 0; fm_u16 pkt_size; if (FM_LOCK(cmd_buf_lock)) return (-FM_ELOCK); pkt_size = mt6627_set_bits_reg(cmd_buf, TX_BUF_SIZE, addr, bits,mask); ret = fm_cmd_tx(cmd_buf, pkt_size, (1<<0x11), SW_RETRY_CNT, FSPI_WR_TIMEOUT, NULL);//0x11 this opcode won't be parsed as an opcode, so set here as spcial case. FM_UNLOCK(cmd_buf_lock); return ret; } static fm_s32 mt6627_top_read(fm_u16 addr, fm_u32 *val) { fm_s32 ret = 0; fm_u16 pkt_size; if (FM_LOCK(cmd_buf_lock)) return (-FM_ELOCK); pkt_size = mt6627_top_get_reg(cmd_buf, TX_BUF_SIZE, addr); ret = fm_cmd_tx(cmd_buf, pkt_size, FLAG_CSPI_READ, SW_RETRY_CNT, FSPI_RD_TIMEOUT, mt6627_get_read_result); if (!ret && mt6627_res) { *val = mt6627_res->cspi_rd; } FM_UNLOCK(cmd_buf_lock); return ret; } static fm_s32 mt6627_top_write(fm_u16 addr, fm_u32 val) { fm_s32 ret = 0; fm_u16 pkt_size; if (FM_LOCK(cmd_buf_lock)) return (-FM_ELOCK); pkt_size = mt6627_top_set_reg(cmd_buf, TX_BUF_SIZE, addr, val); ret = fm_cmd_tx(cmd_buf, pkt_size, FLAG_CSPI_WRITE, SW_RETRY_CNT, FSPI_WR_TIMEOUT, NULL); FM_UNLOCK(cmd_buf_lock); return ret; } /*static fm_s32 mt6627_top_set_bits(fm_u16 addr, fm_u32 bits, fm_u32 mask) { fm_s32 ret = 0; fm_u32 val; ret = mt6627_top_read(addr, &val); if (ret) return ret; val = ((val & (mask)) | bits); ret = mt6627_top_write(addr, val); return ret; }*/ static fm_s32 mt6627_host_read(fm_u32 addr, fm_u32 *val) { fm_s32 ret = 0; fm_u16 pkt_size; if (FM_LOCK(cmd_buf_lock)) return (-FM_ELOCK); pkt_size = mt6627_host_get_reg(cmd_buf, TX_BUF_SIZE, addr); ret = fm_cmd_tx(cmd_buf, pkt_size, FLAG_HOST_READ, SW_RETRY_CNT, FSPI_RD_TIMEOUT, mt6627_get_read_result); if (!ret && mt6627_res) { *val = mt6627_res->cspi_rd; } FM_UNLOCK(cmd_buf_lock); return ret; } static fm_s32 mt6627_host_write(fm_u32 addr, fm_u32 val) { fm_s32 ret = 0; fm_u16 pkt_size; if (FM_LOCK(cmd_buf_lock)) return (-FM_ELOCK); pkt_size = mt6627_host_set_reg(cmd_buf, TX_BUF_SIZE, addr, val); ret = fm_cmd_tx(cmd_buf, pkt_size, FLAG_HOST_WRITE, SW_RETRY_CNT, FSPI_WR_TIMEOUT, NULL); FM_UNLOCK(cmd_buf_lock); return ret; } /*static fm_s32 mt6627_DSP_write(fm_u16 addr, fm_u16 val) { mt6627_write(0xE2, addr); mt6627_write(0xE3, val); mt6627_write(0xE1, 0x0002); return 0; } static fm_s32 mt6627_DSP_read(fm_u16 addr, fm_u16 *val) { fm_s32 ret=-1; mt6627_write(0xE2, addr); mt6627_write(0xE1, 0x0001); ret = mt6627_read(0xE4, val); return ret; }*/ static fm_u16 mt6627_get_chipid(void) { return 0x6627; } /* MT6627_SetAntennaType - set Antenna type * @type - 1,Short Antenna; 0, Long Antenna */ static fm_s32 mt6627_SetAntennaType(fm_s32 type) { fm_u16 dataRead; WCN_DBG(FM_DBG | CHIP, "set ana to %s\n", type ? "short" : "long"); mt6627_read(FM_MAIN_CG2_CTRL, &dataRead); if (type) { dataRead |= ANTENNA_TYPE; } else { dataRead &= (~ANTENNA_TYPE); } mt6627_write(FM_MAIN_CG2_CTRL, dataRead); return 0; } static fm_s32 mt6627_GetAntennaType(void) { fm_u16 dataRead; mt6627_read(FM_MAIN_CG2_CTRL, &dataRead); WCN_DBG(FM_DBG | CHIP, "get ana type: %s\n", (dataRead&ANTENNA_TYPE) ? "short" : "long"); if (dataRead&ANTENNA_TYPE) return FM_ANA_SHORT; //short antenna else return FM_ANA_LONG; //long antenna } static fm_s32 mt6627_Mute(fm_bool mute) { fm_s32 ret = 0; fm_u16 dataRead; WCN_DBG(FM_DBG | CHIP, "set %s\n", mute ? "mute" : "unmute"); //mt6627_read(FM_MAIN_CTRL, &dataRead); mt6627_read(0x9C, &dataRead); //mt6627_top_write(0x0050,0x00000007); if (mute == 1) { ret = mt6627_write(0x9C, (dataRead&0xFFFC) | 0x0003); } else { ret = mt6627_write(0x9C, (dataRead&0xFFFC)); } //mt6627_top_write(0x0050,0x0000000F); return ret; } /*static fm_s32 mt6627_set_RSSITh(fm_u16 TH_long, fm_u16 TH_short) { mt6627_write(0xE2, 0x3072); mt6627_write(0xE3, TH_long); mt6627_write(0xE1, 0x0002); Delayms(1); mt6627_write(0xE2, 0x307A); mt6627_write(0xE3, TH_short); mt6627_write(0xE1, 0x0002); WCN_DBG(FM_DBG | CHIP, "RSSI TH, long:0x%04x, short:0x%04x", TH_long, TH_short); return 0; } */ /* static fm_s32 mt6627_set_SMGTh(fm_s32 ver, fm_u16 TH_smg) { if (mt6627_E1 == ver) { mt6627_write(0xE2, 0x321E); mt6627_write(0xE3, TH_smg); mt6627_write(0xE1, 0x0002); } else { mt6627_write(0xE2, 0x3218); mt6627_write(0xE3, TH_smg); mt6627_write(0xE1, 0x0002); } WCN_DBG(FM_DBG | CHIP, "Soft-mute gain TH %d\n", (int)TH_smg); return 0; } */ static fm_s32 mt6627_RampDown(void) { fm_s32 ret = 0; fm_u16 pkt_size; //fm_u16 tmp; WCN_DBG(FM_DBG | CHIP, "ramp down\n"); //pwer up sequence 0425 mt6627_top_write(0x0050,0x00000007); mt6627_set_bits(0x0F,0x0000,0xF800); mt6627_top_write(0x0050,0x0000000F); //mt6627_read(FM_MAIN_INTRMASK, &tmp); mt6627_write(FM_MAIN_INTRMASK, 0x0000); mt6627_write(FM_MAIN_EXTINTRMASK, 0x0000); if (FM_LOCK(cmd_buf_lock)) return (-FM_ELOCK); pkt_size = mt6627_rampdown(cmd_buf, TX_BUF_SIZE); ret = fm_cmd_tx(cmd_buf, pkt_size, FLAG_RAMPDOWN, SW_RETRY_CNT, RAMPDOWN_TIMEOUT, NULL); FM_UNLOCK(cmd_buf_lock); if (ret) { WCN_DBG(FM_ERR | CHIP, "ramp down failed\n"); } mt6627_write(FM_MAIN_EXTINTRMASK, 0x0021); mt6627_write(FM_MAIN_INTRMASK, 0x0021); return ret; } static fm_s32 mt6627_get_rom_version(void) { fm_u16 tmp; fm_s32 ret; //DSP rom code version request enable --- set 0x61 b15=1 mt6627_set_bits(0x61, 0x8000, 0x7FFF); //Release ASIP reset --- set 0x61 b1=1 mt6627_set_bits(0x61, 0x0002, 0xFFFD); //Enable ASIP power --- set 0x61 b0=0 mt6627_set_bits(0x61, 0x0000, 0xFFFE); //Wait DSP code version ready --- wait 1ms do { Delayus(1000); ret = mt6627_read(0x84, &tmp); //ret=-4 means signal got when control FM. usually get sig 9 to kill FM process. //now cancel FM power up sequence is recommended. if(ret) { return ret; } WCN_DBG(FM_NTC | CHIP, "0x84=%x\n",tmp); }while(tmp != 0x0001); //Get FM DSP code version --- rd 0x83[15:8] mt6627_read(0x83, &tmp); tmp = (tmp >> 8); //DSP rom code version request disable --- set 0x61 b15=0 mt6627_set_bits(0x61, 0x0000, 0x7FFF); //Reset ASIP --- set 0x61[1:0] = 1 mt6627_set_bits(0x61, 0x0001, 0xFFFC); // WCN_DBG(FM_NTC | CHIP, "ROM version: v%d\n", (fm_s32)tmp); return (fm_s32)tmp; } static fm_s32 mt6627_get_patch_path(fm_s32 ver, const fm_s8 **ppath) { fm_s32 i; fm_s32 max = sizeof(mt6627_patch_tbl) / sizeof(mt6627_patch_tbl[0]); //check if the ROM version is defined or not for (i = 0; i < max; i++) { if ((mt6627_patch_tbl[i].idx == ver) && (fm_file_exist(mt6627_patch_tbl[i].patch) == 0)) { *ppath = mt6627_patch_tbl[i].patch; WCN_DBG(FM_NTC | CHIP, "Get ROM version OK\n"); return 0; } } // the ROM version isn't defined, find a latest patch instead for (i = max; i > 0; i--) { if (fm_file_exist(mt6627_patch_tbl[i-1].patch) == 0) { *ppath = mt6627_patch_tbl[i-1].patch; WCN_DBG(FM_WAR | CHIP, "undefined ROM version\n"); return 1; } } // get path failed WCN_DBG(FM_ERR | CHIP, "No valid patch file\n"); return -FM_EPATCH; } static fm_s32 mt6627_get_coeff_path(fm_s32 ver, const fm_s8 **ppath) { fm_s32 i; fm_s32 max = sizeof(mt6627_patch_tbl) / sizeof(mt6627_patch_tbl[0]); //check if the ROM version is defined or not for (i = 0; i < max; i++) { if ((mt6627_patch_tbl[i].idx == ver) && (fm_file_exist(mt6627_patch_tbl[i].coeff) == 0)) { *ppath = mt6627_patch_tbl[i].coeff; WCN_DBG(FM_NTC | CHIP, "Get ROM version OK\n"); return 0; } } // the ROM version isn't defined, find a latest patch instead for (i = max; i > 0; i--) { if (fm_file_exist(mt6627_patch_tbl[i-1].coeff) == 0) { *ppath = mt6627_patch_tbl[i-1].coeff; WCN_DBG(FM_WAR | CHIP, "undefined ROM version\n"); return 1; } } // get path failed WCN_DBG(FM_ERR | CHIP, "No valid coeff file\n"); return -FM_EPATCH; } /* * mt6627_DspPatch - DSP download procedure * @img - source dsp bin code * @len - patch length in byte * @type - rom/patch/coefficient/hw_coefficient */ static fm_s32 mt6627_DspPatch(const fm_u8 *img, fm_s32 len, enum IMG_TYPE type) { fm_u8 seg_num; fm_u8 seg_id = 0; fm_s32 seg_len; fm_s32 ret = 0; fm_u16 pkt_size; FMR_ASSERT(img); if (len <= 0) { return -1; } seg_num = len / PATCH_SEG_LEN + 1; WCN_DBG(FM_NTC | CHIP, "binary len:%d, seg num:%d\n", len, seg_num); switch (type) { #if 0 case IMG_ROM: for (seg_id = 0; seg_id < seg_num; seg_id++) { seg_len = ((seg_id + 1) < seg_num) ? PATCH_SEG_LEN : (len % PATCH_SEG_LEN); WCN_DBG(FM_NTC | CHIP, "rom,[seg_id:%d], [seg_len:%d]\n", seg_id, seg_len); if (FM_LOCK(cmd_buf_lock)) return (-FM_ELOCK); pkt_size = mt6627_rom_download(cmd_buf, TX_BUF_SIZE, seg_num, seg_id, &img[seg_id*PATCH_SEG_LEN], seg_len); WCN_DBG(FM_NTC | CHIP, "pkt_size:%d\n", (fm_s32)pkt_size); ret = fm_cmd_tx(cmd_buf, pkt_size, FLAG_ROM, SW_RETRY_CNT, ROM_TIMEOUT, NULL); FM_UNLOCK(cmd_buf_lock); if (ret) { WCN_DBG(FM_ALT | CHIP, "mt6627_rom_download failed\n"); return ret; } } break; #endif case IMG_PATCH: for (seg_id = 0; seg_id < seg_num; seg_id++) { seg_len = ((seg_id + 1) < seg_num) ? PATCH_SEG_LEN : (len % PATCH_SEG_LEN); WCN_DBG(FM_NTC | CHIP, "patch,[seg_id:%d], [seg_len:%d]\n", seg_id, seg_len); if (FM_LOCK(cmd_buf_lock)) return (-FM_ELOCK); pkt_size = mt6627_patch_download(cmd_buf, TX_BUF_SIZE, seg_num, seg_id, &img[seg_id*PATCH_SEG_LEN], seg_len); WCN_DBG(FM_NTC | CHIP, "pkt_size:%d\n", (fm_s32)pkt_size); ret = fm_cmd_tx(cmd_buf, pkt_size, FLAG_PATCH, SW_RETRY_CNT, PATCH_TIMEOUT, NULL); FM_UNLOCK(cmd_buf_lock); if (ret) { WCN_DBG(FM_ALT | CHIP, "mt6627_patch_download failed\n"); return ret; } } break; #if 0 case IMG_HW_COEFFICIENT: for (seg_id = 0; seg_id < seg_num; seg_id++) { seg_len = ((seg_id + 1) < seg_num) ? PATCH_SEG_LEN : (len % PATCH_SEG_LEN); WCN_DBG(FM_NTC | CHIP, "hwcoeff,[seg_id:%d], [seg_len:%d]\n", seg_id, seg_len); if (FM_LOCK(cmd_buf_lock)) return (-FM_ELOCK); pkt_size = mt6627_hwcoeff_download(cmd_buf, TX_BUF_SIZE, seg_num, seg_id, &img[seg_id*PATCH_SEG_LEN], seg_len); WCN_DBG(FM_NTC | CHIP, "pkt_size:%d\n", (fm_s32)pkt_size); ret = fm_cmd_tx(cmd_buf, pkt_size, FLAG_HWCOEFF, SW_RETRY_CNT, HWCOEFF_TIMEOUT, NULL); FM_UNLOCK(cmd_buf_lock); if (ret) { WCN_DBG(FM_ALT | CHIP, "mt6627_hwcoeff_download failed\n"); return ret; } } break; #endif case IMG_COEFFICIENT: for (seg_id = 0; seg_id < seg_num; seg_id++) { seg_len = ((seg_id + 1) < seg_num) ? PATCH_SEG_LEN : (len % PATCH_SEG_LEN); WCN_DBG(FM_NTC | CHIP, "coeff,[seg_id:%d], [seg_len:%d]\n", seg_id, seg_len); if (FM_LOCK(cmd_buf_lock)) return (-FM_ELOCK); pkt_size = mt6627_coeff_download(cmd_buf, TX_BUF_SIZE, seg_num, seg_id, &img[seg_id*PATCH_SEG_LEN], seg_len); WCN_DBG(FM_NTC | CHIP, "pkt_size:%d\n", (fm_s32)pkt_size); ret = fm_cmd_tx(cmd_buf, pkt_size, FLAG_COEFF, SW_RETRY_CNT, COEFF_TIMEOUT, NULL); FM_UNLOCK(cmd_buf_lock); if (ret) { WCN_DBG(FM_ALT | CHIP, "mt6627_coeff_download failed\n"); return ret; } } break; default: break; } return 0; } static fm_s32 mt6627_PowerUp(fm_u16 *chip_id, fm_u16 *device_id) { #define PATCH_BUF_SIZE 4096*6 fm_s32 ret = 0; fm_u16 pkt_size; fm_u16 tmp_reg = 0; const fm_s8 *path_patch = NULL; const fm_s8 *path_coeff = NULL; //const fm_s8 *path_hwcoeff = NULL; //fm_s32 coeff_len = 0; fm_s32 patch_len = 0; fm_u8 *dsp_buf = NULL; fm_u32 reg = 0; FMR_ASSERT(chip_id); FMR_ASSERT(device_id); WCN_DBG(FM_DBG | CHIP, "pwr on seq......\n"); //Wholechip FM Power Up: step 1, set common SPI parameter ret = mt6627_host_write(0x8013000C, 0x0000801F); if (ret) { WCN_DBG(FM_ALT | CHIP, " pwrup set CSPI failed\n"); return ret; } ret = mt6627_host_read(0x80101030, &reg); ret = mt6627_host_write(0x80101030, reg|(0x1<<1)); if (ret) { WCN_DBG(FM_ALT | CHIP, " pwrup set clock failed\n"); return ret; } if (FM_LOCK(cmd_buf_lock)) return (-FM_ELOCK); pkt_size = mt6627_pwrup_clock_on(cmd_buf, TX_BUF_SIZE); ret = fm_cmd_tx(cmd_buf, pkt_size, FLAG_EN, SW_RETRY_CNT, EN_TIMEOUT, NULL); FM_UNLOCK(cmd_buf_lock); if (ret) { WCN_DBG(FM_ALT | CHIP, "mt6627_pwrup_clock_on failed\n"); return ret; } //#ifdef FM_DIGITAL_INPUT //mt6627_I2s_Setting(MT6627_I2S_ON, MT6627_I2S_MASTER, MT6627_I2S_44K); // mt_combo_audio_ctrl(COMBO_AUDIO_STATE_2); //mtk_wcn_cmb_stub_audio_ctrl((CMB_STUB_AIF_X)CMB_STUB_AIF_2); //#endif //Wholechip FM Power Up: step 2, read HW version mt6627_read(0x62, &tmp_reg); //*chip_id = tmp_reg; if((tmp_reg == 0x6625) || (tmp_reg == 0x6627)) *chip_id = 0x6627; *device_id = tmp_reg; mt6627_hw_info.chip_id = (fm_s32)tmp_reg; WCN_DBG(FM_NTC | CHIP, "chip_id:0x%04x\n", tmp_reg); if ((mt6627_hw_info.chip_id != 0x6627)&&(mt6627_hw_info.chip_id != 0x6625)) { WCN_DBG(FM_NTC | CHIP, "fm sys error, reset hw\n"); return (-FM_EFW); } mt6627_hw_info.eco_ver = (fm_s32)mtk_wcn_wmt_hwver_get(); WCN_DBG(FM_NTC | CHIP, "ECO version:0x%08x\n", mt6627_hw_info.eco_ver); mt6627_hw_info.eco_ver += 1; //get mt6627 DSP rom version if ((ret = mt6627_get_rom_version()) >= 0) { mt6627_hw_info.rom_ver = ret; WCN_DBG(FM_NTC | CHIP, "ROM version: v%d\n", mt6627_hw_info.rom_ver); } else { WCN_DBG(FM_ERR | CHIP, "get ROM version failed\n"); //ret=-4 means signal got when control FM. usually get sig 9 to kill FM process. //now cancel FM power up sequence is recommended. return ret; } //Wholechip FM Power Up: step 3, download patch if (!(dsp_buf = fm_vmalloc(PATCH_BUF_SIZE))) { WCN_DBG(FM_ALT | CHIP, "-ENOMEM\n"); return -ENOMEM; } ret = mt6627_get_patch_path(mt6627_hw_info.rom_ver, &path_patch); if (ret) { WCN_DBG(FM_ALT | CHIP, " mt6627_get_patch_path failed\n"); return ret; } patch_len = fm_file_read(path_patch, dsp_buf, PATCH_BUF_SIZE, 0); ret = mt6627_DspPatch((const fm_u8*)dsp_buf, patch_len, IMG_PATCH); if (ret) { WCN_DBG(FM_ALT | CHIP, " DL DSPpatch failed\n"); return ret; } ret = mt6627_get_coeff_path(mt6627_hw_info.rom_ver, &path_coeff); patch_len = fm_file_read(path_coeff, dsp_buf, PATCH_BUF_SIZE, 0); mt6627_hw_info.rom_ver += 1; tmp_reg = dsp_buf[38] | (dsp_buf[39] << 8);//to be confirmed mt6627_hw_info.patch_ver = (fm_s32)tmp_reg; WCN_DBG(FM_NTC | CHIP, "Patch version: 0x%08x\n", mt6627_hw_info.patch_ver); if (ret == 1) { dsp_buf[4] = 0x00; //if we found rom version undefined, we should disable patch dsp_buf[5] = 0x00; } ret = mt6627_DspPatch((const fm_u8*)dsp_buf, patch_len, IMG_COEFFICIENT); if (ret) { WCN_DBG(FM_ALT | CHIP, " DL DSPcoeff failed\n"); return ret; } mt6627_write(0x92, 0x0000);//? mt6627_write(0x90, 0x0040); mt6627_write(0x90, 0x0000); if (dsp_buf) { fm_vfree(dsp_buf); dsp_buf = NULL; } //Wholechip FM Power Up: step 4, FM Digital Init: fm_rgf_maincon if (FM_LOCK(cmd_buf_lock)) return (-FM_ELOCK); pkt_size = mt6627_pwrup_digital_init(cmd_buf, TX_BUF_SIZE); ret = fm_cmd_tx(cmd_buf, pkt_size, FLAG_EN, SW_RETRY_CNT, EN_TIMEOUT, NULL); FM_UNLOCK(cmd_buf_lock); if (ret) { WCN_DBG(FM_ALT | CHIP, "mt6627_pwrup_digital_init failed\n"); return ret;; } //Wholechip FM Power Up: step 5, FM RF fine tune setting if (FM_LOCK(cmd_buf_lock)) return (-FM_ELOCK); pkt_size = mt6627_pwrup_fine_tune(cmd_buf, TX_BUF_SIZE); ret = fm_cmd_tx(cmd_buf, pkt_size, FLAG_EN, SW_RETRY_CNT, EN_TIMEOUT, NULL); FM_UNLOCK(cmd_buf_lock); if (ret) { WCN_DBG(FM_ALT | CHIP, "mt6627_pwrup_fine_tune failed\n"); return ret;; } //enable connsys FM 2 wire RX mt6627_write(0x9B, 0xF9AB); mt6627_host_write(0x80101054,0x00003f35); WCN_DBG(FM_NTC | CHIP, "pwr on seq ok\n"); return ret; } static fm_s32 mt6627_PowerDown(void) { fm_s32 ret = 0; fm_u16 pkt_size; fm_u16 dataRead; fm_u32 tem; WCN_DBG(FM_DBG | CHIP, "pwr down seq\n"); /*SW work around for MCUFA issue. *if interrupt happen before doing rampdown, DSP can't switch MCUFA back well. * In case read interrupt, and clean if interrupt found before rampdown. */ mt6627_read(FM_MAIN_INTR, &dataRead); if (dataRead & 0x1) { mt6627_write(FM_MAIN_INTR, dataRead);//clear status flag } //mt6627_RampDown(); //#ifdef FM_DIGITAL_INPUT // mt6627_I2s_Setting(MT6627_I2S_OFF, MT6627_I2S_SLAVE, MT6627_I2S_44K); //#endif //pwer up sequence 0425 //A0:set audio output I2X Rx mode: mt6627_host_read(0x80101054,&tem); tem = tem & 0xFFFF9FFF; mt6627_host_write(0x80101054,tem); if (FM_LOCK(cmd_buf_lock)) return (-FM_ELOCK); pkt_size = mt6627_pwrdown(cmd_buf, TX_BUF_SIZE); ret = fm_cmd_tx(cmd_buf, pkt_size, FLAG_EN, SW_RETRY_CNT, EN_TIMEOUT, NULL); FM_UNLOCK(cmd_buf_lock); if (ret) { WCN_DBG(FM_ALT | CHIP, "mt6627_pwrdown failed\n"); return ret; } //FIX_ME, disable ext interrupt mt6627_write(FM_MAIN_EXTINTRMASK, 0x00); // rssi_th_set = fm_false; return ret; } //just for dgb #if 0 static void mt6627_bt_write(fm_u32 addr, fm_u32 val) { fm_u32 tem,i=0; mt6627_host_write(0x80103020,addr); mt6627_host_write(0x80103024,val); mt6627_host_read(0x80103000,&tem); while ((tem == 4) && (i<1000)) { i++; mt6627_host_read(0x80103000,&tem); } return; } #endif static fm_bool mt6627_SetFreq(fm_u16 freq) { fm_s32 ret = 0; fm_u16 pkt_size; fm_u16 chan_para = 0; fm_u32 reg_val = 0; fm_u16 freq_reg = 0; fm_cb_op->cur_freq_set(freq); #if 0 //MCU clock adjust if need if ((ret = mt6627_mcu_dese(freq, NULL)) < 0) { WCN_DBG(FM_ERR | MAIN, "mt6627_mcu_dese FAIL:%d\n", ret); } WCN_DBG(FM_INF | MAIN, "MCU %d\n", ret); //GPS clock adjust if need if ((ret = mt6627_gps_dese(freq, NULL)) < 0) { WCN_DBG(FM_ERR | MAIN, "mt6627_gps_dese FAIL:%d\n", ret); } WCN_DBG(FM_INF | MAIN, "GPS %d\n", ret); #endif //pwer up sequence 0425 mt6627_top_write(0x0050,0x00000007); mt6627_set_bits(0x0F,0x0455,0xF800); if(mt6627_TDD_chan_check(freq)) mt6627_set_bits(0x30, 0x0008, 0xFFF3); //use TDD solution else mt6627_set_bits(0x30, 0x0000, 0xFFF3); //default use FDD solution mt6627_top_write(0x0050,0x0000000F); // if (fm_cb_op->chan_para_get) { chan_para = mt6627_chan_para_get(freq); WCN_DBG(FM_DBG | CHIP, "%d chan para = %d\n", (fm_s32)freq, (fm_s32)chan_para); // } freq_reg = freq; if (0 == fm_get_channel_space(freq_reg)) { freq_reg *= 10; } freq_reg = (freq_reg - 6400) * 2 / 10; mt6627_set_bits(0x65,freq_reg,0xFC00); mt6627_set_bits(0x65,(chan_para << 12),0x0FFF); if((mt6627_hw_info.chip_id == 0x6625) && ((mtk_wcn_wmt_chipid_query() == 0x6592) || (mtk_wcn_wmt_chipid_query() == 0x6752))) { if(mt6627_I2S_hopping_check(freq)) { //set i2s TX desense mode mt6627_set_bits(0x9C,0x80,0xFFFF); //set i2s RX desense mode mt6627_host_read(0x80101054, &reg_val); reg_val |= 0x8000; mt6627_host_write(0x80101054,reg_val); } else { mt6627_set_bits(0x9C,0x0,0xFF7F); mt6627_host_read(0x80101054, &reg_val); reg_val &= 0x7FFF; mt6627_host_write(0x80101054,reg_val); } } if (FM_LOCK(cmd_buf_lock)) return fm_false; pkt_size = mt6627_tune(cmd_buf, TX_BUF_SIZE, freq, chan_para); ret = fm_cmd_tx(cmd_buf, pkt_size, FLAG_TUNE | FLAG_TUNE_DONE, SW_RETRY_CNT, TUNE_TIMEOUT, NULL); FM_UNLOCK(cmd_buf_lock); if (ret) { WCN_DBG(FM_ALT | CHIP, "mt6627_tune failed\n"); return ret; } WCN_DBG(FM_DBG | CHIP, "set freq to %d ok\n", freq); #if 0 //ADPLL setting for dbg mt6627_top_write(0x0050,0x00000007); mt6627_top_write(0x0A08,0xFFFFFFFF); mt6627_bt_write(0x82,0x11); mt6627_bt_write(0x83,0x11); mt6627_bt_write(0x84,0x11); mt6627_top_write(0x0040,0x1C1C1C1C); mt6627_top_write(0x0044,0x1C1C1C1C); mt6627_write(0x70,0x0010); /*0x0806 DCO clk 0x0802 ref clk 0x0804 feedback clk */ mt6627_write(0xE0,0x0806); #endif return fm_true; } #if 0 /* * mt6627_Seek * @pFreq - IN/OUT parm, IN start freq/OUT seek valid freq * @seekdir - 0:up, 1:down * @space - 1:50KHz, 2:100KHz, 4:200KHz * return fm_true:seek success; fm_false:seek failed */ static fm_bool mt6627_Seek(fm_u16 min_freq, fm_u16 max_freq, fm_u16 *pFreq, fm_u16 seekdir, fm_u16 space) { fm_s32 ret = 0; fm_u16 pkt_size,temp; mt6627_RampDown(); mt6627_read(FM_MAIN_CTRL, &temp); mt6627_Mute(fm_true); if (FM_LOCK(cmd_buf_lock)) return fm_false; pkt_size = mt6627_seek(cmd_buf, TX_BUF_SIZE, seekdir, space, max_freq, min_freq); ret = fm_cmd_tx(cmd_buf, pkt_size, FLAG_SEEK | FLAG_SEEK_DONE, SW_RETRY_CNT, SEEK_TIMEOUT, mt6627_get_read_result); FM_UNLOCK(cmd_buf_lock); if (!ret && mt6627_res) { *pFreq = mt6627_res->seek_result; //fm_cb_op->cur_freq_set(*pFreq); } else { WCN_DBG(FM_ALT | CHIP, "mt6627_seek failed\n"); return ret; } //get the result freq WCN_DBG(FM_NTC | CHIP, "seek, result freq:%d\n", *pFreq); mt6627_RampDown(); if((temp&0x0020) == 0) { mt6627_Mute(fm_false); } return fm_true; } #endif #define FM_CQI_LOG_PATH "/mnt/sdcard/fmcqilog" static fm_s32 mt6627_full_cqi_get(fm_s32 min_freq, fm_s32 max_freq, fm_s32 space, fm_s32 cnt) { fm_s32 ret = 0; fm_u16 pkt_size; fm_u16 freq, orig_freq; fm_s32 i, j, k; fm_s32 space_val, max, min, num; struct mt6627_full_cqi *p_cqi; fm_u8 *cqi_log_title = "Freq, RSSI, PAMD, PR, FPAMD, MR, ATDC, PRX, ATDEV, SMGain, DltaRSSI\n"; fm_u8 cqi_log_buf[100] = {0}; fm_s32 pos; fm_u8 cqi_log_path[100] = {0}; WCN_DBG(FM_NTC | CHIP, "6627 cqi log start\n"); // for soft-mute tune, and get cqi freq = fm_cb_op->cur_freq_get(); if (0 == fm_get_channel_space(freq)) { freq *= 10; } // get cqi orig_freq = freq; if (0 == fm_get_channel_space(min_freq)) { min = min_freq * 10; } else { min = min_freq; } if (0 == fm_get_channel_space(max_freq)) { max = max_freq * 10; } else { max = max_freq; } if (space == 0x0001) { space_val = 5; // 50Khz } else if (space == 0x0002) { space_val = 10; // 100Khz } else if (space == 0x0004) { space_val = 20; // 200Khz } else { space_val = 10; } num = (max - min) / space_val + 1; //Eg, (8760 - 8750) / 10 + 1 = 2 for (k = 0; (10000 == orig_freq) && (0xffffffff == g_dbg_level) && (k < cnt); k++) { WCN_DBG(FM_NTC | CHIP, "cqi file:%d\n", k+1); freq = min; pos = 0; fm_memcpy(cqi_log_path, FM_CQI_LOG_PATH, strlen(FM_CQI_LOG_PATH)); sprintf(&cqi_log_path[strlen(FM_CQI_LOG_PATH)], "%d.txt", k+1); fm_file_write(cqi_log_path, cqi_log_title, strlen(cqi_log_title), &pos); for (j = 0; j < num; j++) { if (FM_LOCK(cmd_buf_lock)) return (-FM_ELOCK); pkt_size = mt6627_full_cqi_req(cmd_buf, TX_BUF_SIZE, &freq, 1, 1); ret = fm_cmd_tx(cmd_buf, pkt_size, FLAG_SM_TUNE, SW_RETRY_CNT, SM_TUNE_TIMEOUT, mt6627_get_read_result); FM_UNLOCK(cmd_buf_lock); if (!ret && mt6627_res) { WCN_DBG(FM_NTC | CHIP, "smt cqi size %d\n", mt6627_res->cqi[0]); p_cqi = (struct mt6627_full_cqi*)&mt6627_res->cqi[2]; for (i = 0; i < mt6627_res->cqi[1]; i++) { // just for debug WCN_DBG(FM_NTC | CHIP, "freq %d, 0x%04x, 0x%04x, 0x%04x, 0x%04x, 0x%04x, 0x%04x, 0x%04x, 0x%04x, 0x%04x, 0x%04x\n", p_cqi[i].ch, p_cqi[i].rssi, p_cqi[i].pamd, p_cqi[i].pr, p_cqi[i].fpamd, p_cqi[i].mr, p_cqi[i].atdc, p_cqi[i].prx, p_cqi[i].atdev, p_cqi[i].smg, p_cqi[i].drssi); // format to buffer sprintf(cqi_log_buf, "%04d,%04x,%04x,%04x,%04x,%04x,%04x,%04x,%04x,%04x,%04x,\n", p_cqi[i].ch, p_cqi[i].rssi, p_cqi[i].pamd, p_cqi[i].pr, p_cqi[i].fpamd, p_cqi[i].mr, p_cqi[i].atdc, p_cqi[i].prx, p_cqi[i].atdev, p_cqi[i].smg, p_cqi[i].drssi); // write back to log file fm_file_write(cqi_log_path, cqi_log_buf, strlen(cqi_log_buf), &pos); } } else { WCN_DBG(FM_ALT | CHIP, "smt get CQI failed\n"); ret = -1; } freq += space_val; } fm_cb_op->cur_freq_set(0);//avoid run too much times } WCN_DBG(FM_NTC | CHIP, "6627 cqi log done\n"); return ret; } #if 0 static fm_bool mt6627_Scan(fm_u16 min_freq, fm_u16 max_freq, fm_u16 *pFreq, fm_u16 *pScanTBL, fm_u16 *ScanTBLsize, fm_u16 scandir, fm_u16 space) { fm_s32 ret = 0; fm_u16 pkt_size,temp; fm_u16 offset = 0; fm_u16 tmp_scanTBLsize = *ScanTBLsize; if ((!pScanTBL) || (tmp_scanTBLsize == 0)) { WCN_DBG(FM_ALT | CHIP, "scan, failed:invalid scan table\n"); return fm_false; } WCN_DBG(FM_NTC | CHIP, "start freq: %d, max_freq:%d, min_freq:%d, scan BTL size:%d, scandir:%d, space:%d\n", *pFreq, max_freq, min_freq, *ScanTBLsize, scandir, space); mt6627_RampDown(); mt6627_read(FM_MAIN_CTRL, &temp); mt6627_Mute(fm_true); mt6627_full_cqi_get(min_freq, max_freq, space, 5); // normal scan if (FM_LOCK(cmd_buf_lock)) return (-FM_ELOCK); pkt_size = mt6627_scan(cmd_buf, TX_BUF_SIZE, scandir, space, max_freq, min_freq); ret = fm_cmd_tx(cmd_buf, pkt_size, FLAG_SCAN | FLAG_SCAN_DONE, SW_RETRY_CNT, SCAN_TIMEOUT, mt6627_get_read_result); FM_UNLOCK(cmd_buf_lock); if (!ret && mt6627_res) { fm_memcpy(pScanTBL, mt6627_res->scan_result, sizeof(fm_u16)*FM_SCANTBL_SIZE); WCN_DBG(FM_NTC | CHIP, "Rx scan result:\n"); for (offset = 0; offset < tmp_scanTBLsize; offset++) { WCN_DBG(FM_NTC | CHIP, "%d: %04x\n", (fm_s32)offset, *(pScanTBL + offset)); } *ScanTBLsize = tmp_scanTBLsize; } else { WCN_DBG(FM_ALT | CHIP, "mt6627_scan failed\n"); return ret; } mt6627_set_bits(FM_MAIN_CTRL, 0x0000, 0xFFF0); //make sure tune/seek/scan/cqi bits = 0 if((temp&0x0020) == 0) { mt6627_Mute(fm_false); } return fm_true; } //add for scan cancel case static fm_bool cqi_abort = fm_false; static fm_s32 mt6627_CQI_Get(fm_s8 *buf, fm_s32 buf_len) { fm_s32 ret = 0; fm_s32 i; fm_u16 pkt_size; struct mt6627_fm_cqi *pmt6627_cqi; struct adapt_fm_cqi *pcqi; if (!buf || buf_len < FM_CQI_BUF_SIZE) { return -FM_EBUF; } if (FM_LOCK(cmd_buf_lock)) return (-FM_ELOCK); pkt_size = mt6627_cqi_get(cmd_buf, TX_BUF_SIZE); if (cqi_abort == fm_true) { cqi_abort = fm_false; ret = -1; } else { ret = fm_cmd_tx(cmd_buf, pkt_size, FLAG_SCAN | FLAG_CQI_DONE, SW_RETRY_CNT, SCAN_TIMEOUT, mt6627_get_read_result); } FM_UNLOCK(cmd_buf_lock); if (!ret && mt6627_res) { //FIXEDME pmt6627_cqi = (struct mt6627_fm_cqi*)mt6627_res->cqi; pcqi = (struct adapt_fm_cqi*)buf; for (i = 0; i < (sizeof(mt6627_res->cqi) / sizeof(struct mt6627_fm_cqi)); i++) { pcqi[i].ch = (pmt6627_cqi[i].ch * 10 / 2) + 6400; pcqi[i].rssi = (fm_s32)pmt6627_cqi[i].rssi; if (pcqi[i].rssi >= 32768) { pcqi[i].rssi = pcqi[i].rssi - 65536; } pcqi[i].rssi = ((pcqi[i].rssi * 6) >> 4); WCN_DBG(FM_NTC | CHIP, "%d --> %d(dbm)\n", pcqi[i].ch, pcqi[i].rssi); } } else { WCN_DBG(FM_ALT | CHIP, "mt6627 get CQI failed:%d\n",ret); } mt6627_set_bits(FM_MAIN_CTRL, 0x0000, 0xFFF0); //make sure tune/seek/scan/cqi bits = 0 return ret; } static fm_bool scan_abort = fm_false; #ifdef MTK_FM_50KHZ_SUPPORT #define SCAN_SEG_LEN 250 static fm_s8 raw_buf[16 * sizeof(struct adapt_fm_cqi)] = {0}; static fm_bool mt6627_Scan_50KHz(fm_u16 min_freq, fm_u16 max_freq, fm_u16 *pFreq, fm_u16 *pScanTBL, fm_u16 *ScanTBLsize, fm_u16 scandir, fm_u16 space) { fm_s32 ret = 0; fm_s32 num; fm_s32 seg; fm_s32 i, j; fm_u16 scan_tbl[FM_SCANTBL_SIZE]; //need no less than the chip fm_s32 start_freq, end_freq; fm_s32 ch_offset, step, tmp_val; fm_s32 chl_cnt = 0; fm_s32 word_offset, bit_offset; fm_s32 space_val = 5; struct adapt_fm_cqi *pCQI = (struct adapt_fm_cqi *)raw_buf; if (space == 0x0001) { space_val = 5; // 50Khz } else if (space == 0x0002) { space_val = 10; // 100Khz } else if (space == 0x0004) { space_val = 20; // 200Khz } // calculate segment number num = (max_freq - min_freq) / space_val; //Eg, (10800 - 8750) / 5 = 410 seg = (num / SCAN_SEG_LEN) + ((num % SCAN_SEG_LEN) ? 1 : 0); //Eg, (410 / 200) + ((410 % 200) ? 1 : 0) = 2 + 1 = 3 FM_FIFO_RESET(cqi_fifo); fm_memset(pScanTBL, 0, sizeof(fm_u16) * (*ScanTBLsize)); // do scan scan_abort = fm_false; // reset scan cancel flag for (i = 0; i < seg; i++) { start_freq = min_freq + SCAN_SEG_LEN * space_val * i; end_freq = min_freq + SCAN_SEG_LEN * space_val * (i + 1) - space_val; end_freq = (end_freq > max_freq) ? max_freq : end_freq; chl_cnt = 0; if (fm_true == scan_abort) { scan_abort = fm_false; return fm_false; } if (fm_false == mt6627_Scan(start_freq, end_freq, pFreq, scan_tbl, ScanTBLsize, scandir, space)) { return fm_false; } // get channel count for (ch_offset = 0; ch_offset < FM_SCANTBL_SIZE; ch_offset++) { if (scan_tbl[ch_offset] == 0) continue; for (step = 0; step < 16; step++) { if (scan_tbl[ch_offset] & (1 << step)) { tmp_val = start_freq + (ch_offset * 16 + step) * space_val; WCN_DBG(FM_NTC | CHIP, "freq %d, end freq %d\n", tmp_val, end_freq); if (tmp_val <= end_freq) { chl_cnt++; // set reult bitmap word_offset = (tmp_val - min_freq) / space_val / 16; bit_offset = (tmp_val - min_freq) / space_val % 16; if ((word_offset < 26) && (word_offset >= 0)) { pScanTBL[word_offset] |= (1 << bit_offset); } WCN_DBG(FM_NTC | CHIP, "cnt %d, word %d, bit %d\n", chl_cnt, word_offset, bit_offset); } } } } // get cqi info while (chl_cnt > 0) { ret = mt6627_CQI_Get(raw_buf, 16 * sizeof(struct adapt_fm_cqi)); if (ret) { return ret; } // add valid channel to cqi_fifo for (j = 0; j < sizeof(raw_buf) / sizeof(struct adapt_fm_cqi); j++) { if ((pCQI[j].ch >= start_freq) && (pCQI[j].ch <= end_freq)) { FM_FIFO_INPUT(cqi_fifo, pCQI + j); WCN_DBG(FM_NTC | CHIP, "%d %d(dbm) add to fifo\n", pCQI[j].ch, pCQI[j].rssi); } } chl_cnt -= 16; } } return fm_true; } static fm_s32 mt6627_CQI_Get_50KHz(fm_s8 *buf, fm_s32 buf_len) { fm_s32 ret = 0; fm_s32 i; struct adapt_fm_cqi tmp = { .ch = 0, .rssi = 0, }; struct adapt_fm_cqi *pcqi = (struct adapt_fm_cqi *)buf; if (!buf || buf_len < FM_CQI_BUF_SIZE) { return -FM_EBUF; } for (i = 0; ((i < (buf_len / sizeof(struct adapt_fm_cqi))) && \ (fm_false == FM_FIFO_IS_EMPTY(cqi_fifo))); i++) { FM_FIFO_OUTPUT(cqi_fifo, &tmp); pcqi[i].ch = tmp.ch; pcqi[i].rssi = tmp.rssi; WCN_DBG(FM_NTC | CHIP, "%d %d(dbm) get from fifo\n", pcqi[i].ch, pcqi[i].rssi); } return ret; } #endif //MTK_FM_50KHZ_SUPPORT static fm_s32 mt6627_SeekStop(void) { return fm_force_active_event(FLAG_SEEK_DONE); } static fm_s32 mt6627_ScanStop(void) { cqi_abort = fm_true; scan_abort = fm_true; fm_force_active_event(FLAG_SCAN_DONE | FLAG_CQI_DONE); return 0; } #endif /* * mt6627_GetCurRSSI - get current freq's RSSI value * RS=RSSI * If RS>511, then RSSI(dBm)= (RS-1024)/16*6 * else RSSI(dBm)= RS/16*6 */ static fm_s32 mt6627_GetCurRSSI(fm_s32 *pRSSI) { fm_u16 tmp_reg; mt6627_read(FM_RSSI_IND, &tmp_reg); tmp_reg = tmp_reg & 0x03ff; if (pRSSI) { *pRSSI = (tmp_reg > 511) ? (((tmp_reg - 1024) * 6) >> 4) : ((tmp_reg * 6) >> 4); WCN_DBG(FM_DBG | CHIP, "rssi:%d, dBm:%d\n", tmp_reg, *pRSSI); } else { WCN_DBG(FM_ERR | CHIP, "get rssi para error\n"); return -FM_EPARA; } return 0; } static fm_u16 mt6627_vol_tbl[16] = { \ 0x0000, 0x0519, 0x066A, 0x0814, \ 0x0A2B, 0x0CCD, 0x101D, 0x1449, \ 0x198A, 0x2027, 0x287A, 0x32F5, \ 0x4027, 0x50C3, 0x65AD, 0x7FFF }; static fm_s32 mt6627_SetVol(fm_u8 vol) { fm_s32 ret = 0; vol = (vol > 15) ? 15 : vol; ret = mt6627_write(0x7D, mt6627_vol_tbl[vol]); if (ret) { WCN_DBG(FM_ERR | CHIP, "Set vol=%d Failed\n", vol); return ret; } else { WCN_DBG(FM_DBG | CHIP, "Set vol=%d OK\n", vol); } if (vol == 10) { fm_print_cmd_fifo(); //just for debug fm_print_evt_fifo(); } return 0; } static fm_s32 mt6627_GetVol(fm_u8 *pVol) { int ret = 0; fm_u16 tmp; fm_s32 i; FMR_ASSERT(pVol); ret = mt6627_read(0x7D, &tmp); if (ret) { *pVol = 0; WCN_DBG(FM_ERR | CHIP, "Get vol Failed\n"); return ret; } for (i = 0; i < 16; i++) { if (mt6627_vol_tbl[i] == tmp) { *pVol = i; break; } } WCN_DBG(FM_DBG | CHIP, "Get vol=%d OK\n", *pVol); return 0; } static fm_s32 mt6627_dump_reg(void) { fm_s32 i; fm_u16 TmpReg; for(i=0; i<0xff; i++) { mt6627_read(i, &TmpReg); WCN_DBG(FM_NTC | CHIP, "0x%02x=0x%04x\n",i,TmpReg); } return 0; } /*0:mono, 1:stereo*/ static fm_bool mt6627_GetMonoStereo(fm_u16 *pMonoStereo) { #define FM_BF_STEREO 0x1000 fm_u16 TmpReg; if (pMonoStereo) { mt6627_read(FM_RSSI_IND, &TmpReg); *pMonoStereo = (TmpReg & FM_BF_STEREO) >> 12; } else { WCN_DBG(FM_ERR | CHIP, "MonoStero: para err\n"); return fm_false; } FM_LOG_NTC(CHIP, "Get MonoStero:0x%04x\n", *pMonoStereo); return fm_true; } static fm_s32 mt6627_SetMonoStereo(fm_s32 MonoStereo) { fm_s32 ret = 0; FM_LOG_NTC(CHIP, "set to %s\n", MonoStereo ? "mono" : "auto"); mt6627_top_write(0x50, 0x0007); if (MonoStereo) /*mono*/ { ret = mt6627_set_bits(0x75, 0x0008, ~0x0008); } else /*auto switch*/ { ret = mt6627_set_bits(0x75, 0x0000, ~0x0008); } mt6627_top_write(0x50, 0x000F); return ret; } static fm_s32 mt6627_GetCapArray(fm_s32 *ca) { fm_u16 dataRead; fm_u16 tmp = 0; FMR_ASSERT(ca); mt6627_read(0x60, &tmp); mt6627_write(0x60, tmp&0xFFF7); //0x60 D3=0 mt6627_read(0x26, &dataRead); *ca = dataRead; mt6627_write(0x60, tmp); //0x60 D3=1 return 0; } /* * mt6627_GetCurPamd - get current freq's PAMD value * PA=PAMD * If PA>511 then PAMD(dB)= (PA-1024)/16*6, * else PAMD(dB)=PA/16*6 */ static fm_bool mt6627_GetCurPamd(fm_u16 *pPamdLevl) { fm_u16 tmp_reg; fm_u16 dBvalue,valid_cnt=0; int i,total=0; for (i = 0; i < 8; i++) { if (mt6627_read(FM_ADDR_PAMD, &tmp_reg)) { *pPamdLevl = 0; return fm_false; } tmp_reg &= 0x03FF; dBvalue = (tmp_reg > 256) ? ((512 - tmp_reg) * 6 / 16) : 0; if(dBvalue != 0) { total += dBvalue; valid_cnt++; WCN_DBG(FM_DBG | CHIP, "[%d]PAMD=%d\n",i,dBvalue); } Delayms(3); } if(valid_cnt != 0) { *pPamdLevl = total/valid_cnt; } else { *pPamdLevl = 0; } WCN_DBG(FM_NTC | CHIP, "PAMD=%d\n", *pPamdLevl); return fm_true; } static fm_s32 mt6627_i2s_info_get(fm_s32 *ponoff, fm_s32 *pmode, fm_s32 *psample) { FMR_ASSERT(ponoff); FMR_ASSERT(pmode); FMR_ASSERT(psample); *ponoff = mt6627_fm_config.aud_cfg.i2s_info.status; *pmode = mt6627_fm_config.aud_cfg.i2s_info.mode; *psample = mt6627_fm_config.aud_cfg.i2s_info.rate; return 0; } static fm_s32 mt6627fm_get_audio_info(fm_audio_info_t *data) { memcpy(data,&mt6627_fm_config.aud_cfg,sizeof(fm_audio_info_t)); return 0; } static fm_s32 mt6627_hw_info_get(struct fm_hw_info *req) { FMR_ASSERT(req); req->chip_id = mt6627_hw_info.chip_id; req->eco_ver = mt6627_hw_info.eco_ver; req->patch_ver = mt6627_hw_info.patch_ver; req->rom_ver = mt6627_hw_info.rom_ver; return 0; } static fm_s32 mt6627_pre_search(void) { mt6627_RampDown(); //disable audio output I2S Rx mode mt6627_host_write(0x80101054,0x00000000); //disable audio output I2S Tx mode mt6627_write(0x9B,0x0000); FM_LOG_NTC(FM_NTC | CHIP, "search threshold: RSSI=%d,de-RSSI=%d,smg=%d %d\n", mt6627_fm_config.rx_cfg.long_ana_rssi_th,mt6627_fm_config.rx_cfg.desene_rssi_th,mt6627_fm_config.rx_cfg.smg_th); return 0; } static fm_s32 mt6627_restore_search(void) { mt6627_RampDown(); //set audio output I2S Tx mode mt6627_write(0x9B,0xF9AB); //set audio output I2S Rx mode mt6627_host_write(0x80101054,0x00003f35); return 0; } static fm_s32 mt6627_soft_mute_tune(fm_u16 freq,fm_s32 *rssi,fm_bool *valid) { fm_s32 ret=0; fm_u16 pkt_size; //fm_u16 freq;//, orig_freq; struct mt6627_full_cqi *p_cqi; fm_s32 RSSI=0, PAMD=0,MR=0, ATDC=0; fm_u32 PRX=0,ATDEV=0; fm_u16 softmuteGainLvl=0; ret = mt6627_chan_para_get(freq); if (ret == 2) { ret = mt6627_set_bits(FM_CHANNEL_SET, 0x2000, 0x0FFF);//mdf HiLo } else { ret = mt6627_set_bits(FM_CHANNEL_SET, 0x0000, 0x0FFF);//clear FA/HL/ATJ } if (FM_LOCK(cmd_buf_lock)) return (-FM_ELOCK); pkt_size = mt6627_full_cqi_req(cmd_buf, TX_BUF_SIZE, &freq, 1, 1); ret = fm_cmd_tx(cmd_buf, pkt_size, FLAG_SM_TUNE, SW_RETRY_CNT, SM_TUNE_TIMEOUT, mt6627_get_read_result); FM_UNLOCK(cmd_buf_lock); if (!ret && mt6627_res) { WCN_DBG(FM_NTC | CHIP, "smt cqi size %d\n", mt6627_res->cqi[0]); p_cqi = (struct mt6627_full_cqi*)&mt6627_res->cqi[2]; // just for debug WCN_DBG(FM_NTC | CHIP, "freq %d, 0x%04x, 0x%04x, 0x%04x, 0x%04x, 0x%04x, 0x%04x, 0x%04x, 0x%04x, 0x%04x, 0x%04x\n", p_cqi->ch, p_cqi->rssi, p_cqi->pamd, p_cqi->pr, p_cqi->fpamd, p_cqi->mr, p_cqi->atdc, p_cqi->prx, p_cqi->atdev, p_cqi->smg, p_cqi->drssi); RSSI = ((p_cqi->rssi & 0x03FF) >= 512) ? ((p_cqi->rssi & 0x03FF) - 1024) : (p_cqi->rssi & 0x03FF); PAMD = ((p_cqi->pamd & 0x1FF) >= 256) ? ((p_cqi->pamd & 0x01FF) - 512) : (p_cqi->pamd & 0x01FF); MR = ((p_cqi->mr & 0x01FF) >= 256) ? ((p_cqi->mr & 0x01FF) - 512) : (p_cqi->mr & 0x01FF); ATDC =(p_cqi->atdc >= 32768) ? (65536 - p_cqi->atdc) : (p_cqi->atdc); if(ATDC < 0) { ATDC = (~(ATDC)) - 1;//Get abs value of ATDC } PRX = (p_cqi->prx & 0x00FF); ATDEV = p_cqi->atdev; softmuteGainLvl = p_cqi->smg; //check if the channel is valid according to each CQIs if((RSSI >= mt6627_fm_config.rx_cfg.long_ana_rssi_th) && (PAMD <= mt6627_fm_config.rx_cfg.pamd_th) && (ATDC <= mt6627_fm_config.rx_cfg.atdc_th) && (MR >= mt6627_fm_config.rx_cfg.mr_th) && (PRX >= mt6627_fm_config.rx_cfg.prx_th) && (ATDEV >= ATDC)//sync scan algorithm && (softmuteGainLvl >= mt6627_fm_config.rx_cfg.smg_th)) { *valid = fm_true; } else { *valid = fm_false; } *rssi = RSSI; /* if(RSSI < -296) WCN_DBG(FM_NTC | CHIP, "rssi\n"); else if(PAMD > -12) WCN_DBG(FM_NTC | CHIP, "PAMD\n"); else if(ATDC > 3496) WCN_DBG(FM_NTC | CHIP, "ATDC\n"); else if(MR < -67) WCN_DBG(FM_NTC | CHIP, "MR\n"); else if(PRX < 80) WCN_DBG(FM_NTC | CHIP, "PRX\n"); else if(ATDEV < ATDC) WCN_DBG(FM_NTC | CHIP, "ATDEV\n"); else if(softmuteGainLvl < 16421) WCN_DBG(FM_NTC | CHIP, "softmuteGainLvl\n"); */ } else { WCN_DBG(FM_ALT | CHIP, "smt get CQI failed\n"); return fm_false; } WCN_DBG(FM_NTC | CHIP, "valid=%d\n",*valid); return fm_true; } static fm_bool mt6627_em_test(fm_u16 group_idx, fm_u16 item_idx, fm_u32 item_value) { return fm_true; } /* parm: parm.th_type: 0, RSSI. 1,desense RSSI. 2,SMG. parm.th_val: threshold value */ static fm_s32 mt6627_set_search_th(fm_s32 idx,fm_s32 val,fm_s32 reserve) { switch (idx) { case 0: { mt6627_fm_config.rx_cfg.long_ana_rssi_th = val; WCN_DBG(FM_NTC | CHIP, "set rssi th =%d\n",val); break; } case 1: { mt6627_fm_config.rx_cfg.desene_rssi_th = val; WCN_DBG(FM_NTC | CHIP, "set desense rssi th =%d\n",val); break; } case 2: { mt6627_fm_config.rx_cfg.smg_th = val; WCN_DBG(FM_NTC | CHIP, "set smg th =%d\n",val); break; } default: break; } return 0; } static fm_s32 MT6627fm_low_power_wa_default(fm_s32 fmon) { return 0; } fm_s32 MT6627fm_low_ops_register(struct fm_lowlevel_ops *ops) { fm_s32 ret = 0; //Basic functions. FMR_ASSERT(ops); FMR_ASSERT(ops->cb.cur_freq_get); FMR_ASSERT(ops->cb.cur_freq_set); fm_cb_op = &ops->cb; ops->bi.pwron = mt6627_pwron; ops->bi.pwroff = mt6627_pwroff; ops->bi.msdelay = Delayms; ops->bi.usdelay = Delayus; ops->bi.read = mt6627_read; ops->bi.write = mt6627_write; ops->bi.top_read = mt6627_top_read; ops->bi.top_write = mt6627_top_write; ops->bi.host_read = mt6627_host_read; ops->bi.host_write = mt6627_host_write; ops->bi.setbits = mt6627_set_bits; ops->bi.chipid_get = mt6627_get_chipid; ops->bi.mute = mt6627_Mute; ops->bi.rampdown = mt6627_RampDown; ops->bi.pwrupseq = mt6627_PowerUp; ops->bi.pwrdownseq = mt6627_PowerDown; ops->bi.setfreq = mt6627_SetFreq; ops->bi.low_pwr_wa = MT6627fm_low_power_wa_default; ops->bi.get_aud_info = mt6627fm_get_audio_info; #if 0 ops->bi.seek = mt6627_Seek; ops->bi.seekstop = mt6627_SeekStop; ops->bi.scan = mt6627_Scan; ops->bi.cqi_get = mt6627_CQI_Get; #ifdef MTK_FM_50KHZ_SUPPORT ops->bi.scan = mt6627_Scan_50KHz; ops->bi.cqi_get = mt6627_CQI_Get_50KHz; #endif ops->bi.scanstop = mt6627_ScanStop; ops->bi.i2s_set = mt6627_I2s_Setting; #endif ops->bi.rssiget = mt6627_GetCurRSSI; ops->bi.volset = mt6627_SetVol; ops->bi.volget = mt6627_GetVol; ops->bi.dumpreg = mt6627_dump_reg; ops->bi.msget = mt6627_GetMonoStereo; ops->bi.msset = mt6627_SetMonoStereo; ops->bi.pamdget = mt6627_GetCurPamd; ops->bi.em = mt6627_em_test; ops->bi.anaswitch = mt6627_SetAntennaType; ops->bi.anaget = mt6627_GetAntennaType; ops->bi.caparray_get = mt6627_GetCapArray; ops->bi.hwinfo_get = mt6627_hw_info_get; ops->bi.i2s_get = mt6627_i2s_info_get; ops->bi.is_dese_chan = mt6627_is_dese_chan; ops->bi.softmute_tune = mt6627_soft_mute_tune; ops->bi.desense_check = mt6627_desense_check; ops->bi.cqi_log = mt6627_full_cqi_get; ops->bi.pre_search = mt6627_pre_search; ops->bi.restore_search = mt6627_restore_search; ops->bi.set_search_th = mt6627_set_search_th; cmd_buf_lock = fm_lock_create("27_cmd"); ret = fm_lock_get(cmd_buf_lock); cmd_buf = fm_zalloc(TX_BUF_SIZE + 1); if (!cmd_buf) { WCN_DBG(FM_ALT | CHIP, "6627 fm lib alloc tx buf failed\n"); ret = -1; } #if 0//def MTK_FM_50KHZ_SUPPORT cqi_fifo = fm_fifo_create("6628_cqi_fifo", sizeof(struct adapt_fm_cqi), 640); if (!cqi_fifo) { WCN_DBG(FM_ALT | CHIP, "6627 fm lib create cqi fifo failed\n"); ret = -1; } #endif return ret; } fm_s32 MT6627fm_low_ops_unregister(struct fm_lowlevel_ops *ops) { fm_s32 ret = 0; //Basic functions. FMR_ASSERT(ops); #if 0//def MTK_FM_50KHZ_SUPPORT fm_fifo_release(cqi_fifo); #endif if (cmd_buf) { fm_free(cmd_buf); cmd_buf = NULL; } ret = fm_lock_put(cmd_buf_lock); fm_memset(&ops->bi, 0, sizeof(struct fm_basic_interface)); return ret; } //static struct fm_pub pub; //static struct fm_pub_cb *pub_cb = &pub.pub_tbl; static const fm_u16 mt6627_mcu_dese_list[] = { 7630, 7800, 7940, 8320, 9260, 9600, 9710, 9920, 10400, 10410 }; static const fm_u16 mt6627_gps_dese_list[] = { 7850, 7860 }; static const fm_s8 mt6627_chan_para_map[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, //6500~6595 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //6600~6695 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 2, 0, 0, 0, 0, 0, //6700~6795 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //6800~6895 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, //6900~6995 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //7000~7095 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, //7100~7195 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 2, 0, //7200~7295 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //7300~7395 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //7400~7495 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //7500~7595 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, //7600~7695 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //7700~7795 8, 0, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //7800~7895 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, //7900~7995 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, //8000~8095 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //8100~8195 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //8200~8295 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //8300~8395 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, //8400~8495 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //8500~8595 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //8600~8695 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //8700~8795 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, //8800~8895 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //8900~8995 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //9000~9095 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //9100~9195 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //9200~9295 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, //9300~9395 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //9400~9495 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, //9500~9595 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //9600~9695 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //9700~9795 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //9800~9895 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, //9900~9995 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //10000~10095 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //10100~10195 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, //10200~10295 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //10300~10395 8, 0, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //10400~10495 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //10500~10595 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, //10600~10695 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, //10700~10795 0 //10800 }; static const fm_u16 mt6627_scan_dese_list[] = { 6910, 7680, 7800, 9210, 9220, 9230, 9600, 9980, 9990, 10400, 10750, 10760 }; static const fm_u16 mt6627_I2S_hopping_list[] = { 6550, 6760, 6960, 6970, 7170, 7370, 7580, 7780, 7980, 7990, 8390, 8400, 8810, 9210, 9220, 10240, 1065 }; static const fm_u16 mt6627_TDD_list[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, //6500~6595 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, //6600~6695 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, //6700~6795 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, //6800~6895 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, //6900~6995 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, //7000~7095 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, //7100~7195 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, //7200~7295 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, //7300~7395 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, //7400~7495 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, //7500~7595 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, //7600~7695 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, //7700~7795 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, //7800~7895 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, //7900~7995 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, //8000~8095 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, //8100~8195 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, //8200~8295 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, //8300~8395 0x0101, 0x0000, 0x0000, 0x0000, 0x0000, //8400~8495 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, //8500~8595 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, //8600~8695 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, //8700~8795 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, //8800~8895 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, //8900~8995 0x0000, 0x0000, 0x0101, 0x0101, 0x0101, //9000~9095 0x0101, 0x0000, 0x0000, 0x0000, 0x0000, //9100~9195 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, //9200~9295 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, //9300~9395 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, //9400~9495 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, //9500~9595 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, //9600~9695 0x0000, 0x0000, 0x0000, 0x0000, 0x0100, //9700~9795 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, //9800~9895 0x0101, 0x0101, 0x0001, 0x0000, 0x0000, //9900~9995 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, //10000~10095 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, //10100~10195 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, //10200~10295 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, //10300~10395 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, //10400~10495 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, //10500~10595 0x0000, 0x0000, 0x0000, 0x0000, 0x0100, //10600~10695 0x0101, 0x0101, 0x0101, 0x0101, 0x0101, //10700~10795 0x0001 //10800 }; static const fm_u16 mt6627_TDD_Mask[] = { 0x0001, 0x0010, 0x0100, 0x1000 }; // return value: 0, not a de-sense channel; 1, this is a de-sense channel; else error no static fm_s32 mt6627_is_dese_chan(fm_u16 freq) { fm_s32 size; //return 0;//HQA only :skip desense channel check. size = sizeof(mt6627_scan_dese_list) / sizeof(mt6627_scan_dese_list[0]); if (0 == fm_get_channel_space(freq)) { freq *= 10; } while (size) { if (mt6627_scan_dese_list[size -1] == freq) return 1; size--; } return 0; } /* return value: 1, is desense channel and rssi is less than threshold; 0, not desense channel or it is but rssi is more than threshold.*/ static fm_s32 mt6627_desense_check(fm_u16 freq,fm_s32 rssi) { if(mt6627_is_dese_chan(freq)) { if(rssi<mt6627_fm_config.rx_cfg.desene_rssi_th) { return 1; } WCN_DBG(FM_DBG | CHIP, "desen_rssi %d th:%d\n", rssi,mt6627_fm_config.rx_cfg.desene_rssi_th); } return 0; } static fm_bool mt6627_TDD_chan_check(fm_u16 freq) { fm_u32 i = 0; fm_u16 freq_tmp = freq; fm_s32 ret = 0; ret = fm_get_channel_space(freq_tmp); if (0 == ret) { freq_tmp *= 10; } else if(-1 == ret) return fm_false; i = (freq_tmp - 6500)/5; WCN_DBG(FM_NTC | CHIP, "Freq %d is 0x%4x, mask is 0x%4x\n", freq,(mt6627_TDD_list[i/4]),mt6627_TDD_Mask[i%4]); if(mt6627_TDD_list[i/4] & mt6627_TDD_Mask[i%4]) { WCN_DBG(FM_NTC | CHIP, "Freq %d use TDD solution\n", freq); return fm_true; } else return fm_false; } //get channel parameter, HL side/ FA / ATJ static fm_u16 mt6627_chan_para_get(fm_u16 freq) { fm_s32 pos, size; //return 0;//for HQA only: skip FA/HL/ATJ if (0 == fm_get_channel_space(freq)) { freq *= 10; } if(freq < 6500) { return 0; } pos = (freq - 6500) / 5; size = sizeof(mt6627_chan_para_map) / sizeof(mt6627_chan_para_map[0]); pos = (pos < 0) ? 0 : pos; pos = (pos > (size - 1)) ? (size - 1) : pos; return mt6627_chan_para_map[pos]; } static fm_bool mt6627_I2S_hopping_check(fm_u16 freq) { fm_s32 size; size = sizeof(mt6627_I2S_hopping_list) / sizeof(mt6627_I2S_hopping_list[0]); if (0 == fm_get_channel_space(freq)) { freq *= 10; } while (size) { if (mt6627_I2S_hopping_list[size -1] == freq) return 1; size--; } return 0; }
gpl-2.0
holyangel/LGE_G3
drivers/md/dm-service-time.c
8
8444
/* * Copyright (C) 2007-2009 NEC Corporation. All Rights Reserved. * * Module Author: Kiyoshi Ueda * * This file is released under the GPL. * * Throughput oriented path selector. */ #include "dm.h" #include "dm-path-selector.h" #include <linux/slab.h> #include <linux/module.h> #define DM_MSG_PREFIX "multipath service-time" #define ST_MIN_IO 1 #define ST_MAX_RELATIVE_THROUGHPUT 100 #define ST_MAX_RELATIVE_THROUGHPUT_SHIFT 7 #define ST_MAX_INFLIGHT_SIZE ((size_t)-1 >> ST_MAX_RELATIVE_THROUGHPUT_SHIFT) #define ST_VERSION "0.2.0" struct selector { struct list_head valid_paths; struct list_head failed_paths; }; struct path_info { struct list_head list; struct dm_path *path; unsigned repeat_count; unsigned relative_throughput; atomic_t in_flight_size; /* */ }; static struct selector *alloc_selector(void) { struct selector *s = kmalloc(sizeof(*s), GFP_KERNEL); if (s) { INIT_LIST_HEAD(&s->valid_paths); INIT_LIST_HEAD(&s->failed_paths); } return s; } static int st_create(struct path_selector *ps, unsigned argc, char **argv) { struct selector *s = alloc_selector(); if (!s) return -ENOMEM; ps->context = s; return 0; } static void free_paths(struct list_head *paths) { struct path_info *pi, *next; list_for_each_entry_safe(pi, next, paths, list) { list_del(&pi->list); kfree(pi); } } static void st_destroy(struct path_selector *ps) { struct selector *s = ps->context; free_paths(&s->valid_paths); free_paths(&s->failed_paths); kfree(s); ps->context = NULL; } static int st_status(struct path_selector *ps, struct dm_path *path, status_type_t type, char *result, unsigned maxlen) { unsigned sz = 0; struct path_info *pi; if (!path) DMEMIT("0 "); else { pi = path->pscontext; switch (type) { case STATUSTYPE_INFO: DMEMIT("%d %u ", atomic_read(&pi->in_flight_size), pi->relative_throughput); break; case STATUSTYPE_TABLE: DMEMIT("%u %u ", pi->repeat_count, pi->relative_throughput); break; } } return sz; } static int st_add_path(struct path_selector *ps, struct dm_path *path, int argc, char **argv, char **error) { struct selector *s = ps->context; struct path_info *pi; unsigned repeat_count = ST_MIN_IO; unsigned relative_throughput = 1; char dummy; /* */ if (argc > 2) { *error = "service-time ps: incorrect number of arguments"; return -EINVAL; } if (argc && (sscanf(argv[0], "%u%c", &repeat_count, &dummy) != 1)) { *error = "service-time ps: invalid repeat count"; return -EINVAL; } if ((argc == 2) && (sscanf(argv[1], "%u%c", &relative_throughput, &dummy) != 1 || relative_throughput > ST_MAX_RELATIVE_THROUGHPUT)) { *error = "service-time ps: invalid relative_throughput value"; return -EINVAL; } /* */ pi = kmalloc(sizeof(*pi), GFP_KERNEL); if (!pi) { *error = "service-time ps: Error allocating path context"; return -ENOMEM; } pi->path = path; pi->repeat_count = repeat_count; pi->relative_throughput = relative_throughput; atomic_set(&pi->in_flight_size, 0); path->pscontext = pi; list_add_tail(&pi->list, &s->valid_paths); return 0; } static void st_fail_path(struct path_selector *ps, struct dm_path *path) { struct selector *s = ps->context; struct path_info *pi = path->pscontext; list_move(&pi->list, &s->failed_paths); } static int st_reinstate_path(struct path_selector *ps, struct dm_path *path) { struct selector *s = ps->context; struct path_info *pi = path->pscontext; list_move_tail(&pi->list, &s->valid_paths); return 0; } /* */ static int st_compare_load(struct path_info *pi1, struct path_info *pi2, size_t incoming) { size_t sz1, sz2, st1, st2; sz1 = atomic_read(&pi1->in_flight_size); sz2 = atomic_read(&pi2->in_flight_size); /* */ if (pi1->relative_throughput == pi2->relative_throughput) return sz1 - sz2; /* */ if (sz1 == sz2 || !pi1->relative_throughput || !pi2->relative_throughput) return pi2->relative_throughput - pi1->relative_throughput; /* */ sz1 += incoming; sz2 += incoming; if (unlikely(sz1 >= ST_MAX_INFLIGHT_SIZE || sz2 >= ST_MAX_INFLIGHT_SIZE)) { /* */ sz1 >>= ST_MAX_RELATIVE_THROUGHPUT_SHIFT; sz2 >>= ST_MAX_RELATIVE_THROUGHPUT_SHIFT; } st1 = sz1 * pi2->relative_throughput; st2 = sz2 * pi1->relative_throughput; if (st1 != st2) return st1 - st2; /* */ return pi2->relative_throughput - pi1->relative_throughput; } static struct dm_path *st_select_path(struct path_selector *ps, unsigned *repeat_count, size_t nr_bytes) { struct selector *s = ps->context; struct path_info *pi = NULL, *best = NULL; if (list_empty(&s->valid_paths)) return NULL; /* */ list_move_tail(s->valid_paths.next, &s->valid_paths); list_for_each_entry(pi, &s->valid_paths, list) if (!best || (st_compare_load(pi, best, nr_bytes) < 0)) best = pi; if (!best) return NULL; *repeat_count = best->repeat_count; return best->path; } static int st_start_io(struct path_selector *ps, struct dm_path *path, size_t nr_bytes) { struct path_info *pi = path->pscontext; atomic_add(nr_bytes, &pi->in_flight_size); return 0; } static int st_end_io(struct path_selector *ps, struct dm_path *path, size_t nr_bytes) { struct path_info *pi = path->pscontext; atomic_sub(nr_bytes, &pi->in_flight_size); return 0; } static struct path_selector_type st_ps = { .name = "service-time", .module = THIS_MODULE, .table_args = 2, .info_args = 2, .create = st_create, .destroy = st_destroy, .status = st_status, .add_path = st_add_path, .fail_path = st_fail_path, .reinstate_path = st_reinstate_path, .select_path = st_select_path, .start_io = st_start_io, .end_io = st_end_io, }; static int __init dm_st_init(void) { int r = dm_register_path_selector(&st_ps); if (r < 0) DMERR("register failed %d", r); DMINFO("version " ST_VERSION " loaded"); return r; } static void __exit dm_st_exit(void) { int r = dm_unregister_path_selector(&st_ps); if (r < 0) DMERR("unregister failed %d", r); } module_init(dm_st_init); module_exit(dm_st_exit); MODULE_DESCRIPTION(DM_NAME " throughput oriented path selector"); MODULE_AUTHOR("Kiyoshi Ueda <k-ueda@ct.jp.nec.com>"); MODULE_LICENSE("GPL");
gpl-2.0
huanghjb/codelite
git/gitCloneDlg.cpp
8
1706
////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////// // // copyright : (C) 2014 The CodeLite Team // file name : gitCloneDlg.cpp // // ------------------------------------------------------------------------- // A // _____ _ _ _ _ // / __ \ | | | | (_) | // | / \/ ___ __| | ___| | _| |_ ___ // | | / _ \ / _ |/ _ \ | | | __/ _ ) // | \__/\ (_) | (_| | __/ |___| | || __/ // \____/\___/ \__,_|\___\_____/_|\__\___| // // F i l e // // This program is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 2 of the License, or // (at your option) any later version. // ////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////// #include "gitCloneDlg.h" #include "windowattrmanager.h" #include <wx/uri.h> gitCloneDlg::gitCloneDlg(wxWindow* parent) : gitCloneDlgBaseClass(parent) { m_textCtrlURL->SetFocus(); SetName("gitCloneDlg"); WindowAttrManager::Load(this); } gitCloneDlg::~gitCloneDlg() {} void gitCloneDlg::OnOKUI(wxUpdateUIEvent& event) { event.Enable(!m_textCtrlURL->IsEmpty() && !m_dirPickerTargetDir->GetPath().IsEmpty()); } wxString gitCloneDlg::GetCloneURL() const { wxString urlString = m_textCtrlURL->GetValue(); return urlString; }
gpl-2.0
aicjofs/android_kernel_lge_v500_stock
drivers/media/dvb/frontends/ves1820.c
8
11495
/* VES1820 - Single Chip Cable Channel Receiver driver module Copyright (C) 1999 Convergence Integrated Media GmbH <ralph@convergence.de> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/delay.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> #include <linux/slab.h> #include <asm/div64.h> #include "dvb_frontend.h" #include "ves1820.h" struct ves1820_state { struct i2c_adapter* i2c; /* */ const struct ves1820_config* config; struct dvb_frontend frontend; /* */ u8 reg0; u8 pwm; }; static int verbose; static u8 ves1820_inittab[] = { 0x69, 0x6A, 0x93, 0x1A, 0x12, 0x46, 0x26, 0x1A, 0x43, 0x6A, 0xAA, 0xAA, 0x1E, 0x85, 0x43, 0x20, 0xE0, 0x00, 0xA1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40 }; static int ves1820_writereg(struct ves1820_state *state, u8 reg, u8 data) { u8 buf[] = { 0x00, reg, data }; struct i2c_msg msg = {.addr = state->config->demod_address,.flags = 0,.buf = buf,.len = 3 }; int ret; ret = i2c_transfer(state->i2c, &msg, 1); if (ret != 1) printk("ves1820: %s(): writereg error (reg == 0x%02x, " "val == 0x%02x, ret == %i)\n", __func__, reg, data, ret); return (ret != 1) ? -EREMOTEIO : 0; } static u8 ves1820_readreg(struct ves1820_state *state, u8 reg) { u8 b0[] = { 0x00, reg }; u8 b1[] = { 0 }; struct i2c_msg msg[] = { {.addr = state->config->demod_address,.flags = 0,.buf = b0,.len = 2}, {.addr = state->config->demod_address,.flags = I2C_M_RD,.buf = b1,.len = 1} }; int ret; ret = i2c_transfer(state->i2c, msg, 2); if (ret != 2) printk("ves1820: %s(): readreg error (reg == 0x%02x, " "ret == %i)\n", __func__, reg, ret); return b1[0]; } static int ves1820_setup_reg0(struct ves1820_state *state, u8 reg0, fe_spectral_inversion_t inversion) { reg0 |= state->reg0 & 0x62; if (INVERSION_ON == inversion) { if (!state->config->invert) reg0 |= 0x20; else reg0 &= ~0x20; } else if (INVERSION_OFF == inversion) { if (!state->config->invert) reg0 &= ~0x20; else reg0 |= 0x20; } ves1820_writereg(state, 0x00, reg0 & 0xfe); ves1820_writereg(state, 0x00, reg0 | 0x01); state->reg0 = reg0; return 0; } static int ves1820_set_symbolrate(struct ves1820_state *state, u32 symbolrate) { s32 BDR; s32 BDRI; s16 SFIL = 0; u16 NDEC = 0; u32 ratio; u32 fin; u32 tmp; u64 fptmp; u64 fpxin; if (symbolrate > state->config->xin / 2) symbolrate = state->config->xin / 2; if (symbolrate < 500000) symbolrate = 500000; if (symbolrate < state->config->xin / 16) NDEC = 1; if (symbolrate < state->config->xin / 32) NDEC = 2; if (symbolrate < state->config->xin / 64) NDEC = 3; /* */ fpxin = state->config->xin * 10; fptmp = fpxin; do_div(fptmp, 123); if (symbolrate < fptmp) SFIL = 1; fptmp = fpxin; do_div(fptmp, 160); if (symbolrate < fptmp) SFIL = 0; fptmp = fpxin; do_div(fptmp, 246); if (symbolrate < fptmp) SFIL = 1; fptmp = fpxin; do_div(fptmp, 320); if (symbolrate < fptmp) SFIL = 0; fptmp = fpxin; do_div(fptmp, 492); if (symbolrate < fptmp) SFIL = 1; fptmp = fpxin; do_div(fptmp, 640); if (symbolrate < fptmp) SFIL = 0; fptmp = fpxin; do_div(fptmp, 984); if (symbolrate < fptmp) SFIL = 1; fin = state->config->xin >> 4; symbolrate <<= NDEC; ratio = (symbolrate << 4) / fin; tmp = ((symbolrate << 4) % fin) << 8; ratio = (ratio << 8) + tmp / fin; tmp = (tmp % fin) << 8; ratio = (ratio << 8) + DIV_ROUND_CLOSEST(tmp, fin); BDR = ratio; BDRI = (((state->config->xin << 5) / symbolrate) + 1) / 2; if (BDRI > 0xFF) BDRI = 0xFF; SFIL = (SFIL << 4) | ves1820_inittab[0x0E]; NDEC = (NDEC << 6) | ves1820_inittab[0x03]; ves1820_writereg(state, 0x03, NDEC); ves1820_writereg(state, 0x0a, BDR & 0xff); ves1820_writereg(state, 0x0b, (BDR >> 8) & 0xff); ves1820_writereg(state, 0x0c, (BDR >> 16) & 0x3f); ves1820_writereg(state, 0x0d, BDRI); ves1820_writereg(state, 0x0e, SFIL); return 0; } static int ves1820_init(struct dvb_frontend* fe) { struct ves1820_state* state = fe->demodulator_priv; int i; ves1820_writereg(state, 0, 0); for (i = 0; i < sizeof(ves1820_inittab); i++) ves1820_writereg(state, i, ves1820_inittab[i]); if (state->config->selagc) ves1820_writereg(state, 2, ves1820_inittab[2] | 0x08); ves1820_writereg(state, 0x34, state->pwm); return 0; } static int ves1820_set_parameters(struct dvb_frontend *fe) { struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct ves1820_state* state = fe->demodulator_priv; static const u8 reg0x00[] = { 0x00, 0x04, 0x08, 0x0c, 0x10 }; static const u8 reg0x01[] = { 140, 140, 106, 100, 92 }; static const u8 reg0x05[] = { 135, 100, 70, 54, 38 }; static const u8 reg0x08[] = { 162, 116, 67, 52, 35 }; static const u8 reg0x09[] = { 145, 150, 106, 126, 107 }; int real_qam = p->modulation - QAM_16; if (real_qam < 0 || real_qam > 4) return -EINVAL; if (fe->ops.tuner_ops.set_params) { fe->ops.tuner_ops.set_params(fe); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } ves1820_set_symbolrate(state, p->symbol_rate); ves1820_writereg(state, 0x34, state->pwm); ves1820_writereg(state, 0x01, reg0x01[real_qam]); ves1820_writereg(state, 0x05, reg0x05[real_qam]); ves1820_writereg(state, 0x08, reg0x08[real_qam]); ves1820_writereg(state, 0x09, reg0x09[real_qam]); ves1820_setup_reg0(state, reg0x00[real_qam], p->inversion); ves1820_writereg(state, 2, ves1820_inittab[2] | (state->config->selagc ? 0x08 : 0)); return 0; } static int ves1820_read_status(struct dvb_frontend* fe, fe_status_t* status) { struct ves1820_state* state = fe->demodulator_priv; int sync; *status = 0; sync = ves1820_readreg(state, 0x11); if (sync & 1) *status |= FE_HAS_SIGNAL; if (sync & 2) *status |= FE_HAS_CARRIER; if (sync & 2) /* */ *status |= FE_HAS_VITERBI; if (sync & 4) *status |= FE_HAS_SYNC; if (sync & 8) *status |= FE_HAS_LOCK; return 0; } static int ves1820_read_ber(struct dvb_frontend* fe, u32* ber) { struct ves1820_state* state = fe->demodulator_priv; u32 _ber = ves1820_readreg(state, 0x14) | (ves1820_readreg(state, 0x15) << 8) | ((ves1820_readreg(state, 0x16) & 0x0f) << 16); *ber = 10 * _ber; return 0; } static int ves1820_read_signal_strength(struct dvb_frontend* fe, u16* strength) { struct ves1820_state* state = fe->demodulator_priv; u8 gain = ves1820_readreg(state, 0x17); *strength = (gain << 8) | gain; return 0; } static int ves1820_read_snr(struct dvb_frontend* fe, u16* snr) { struct ves1820_state* state = fe->demodulator_priv; u8 quality = ~ves1820_readreg(state, 0x18); *snr = (quality << 8) | quality; return 0; } static int ves1820_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks) { struct ves1820_state* state = fe->demodulator_priv; *ucblocks = ves1820_readreg(state, 0x13) & 0x7f; if (*ucblocks == 0x7f) *ucblocks = 0xffffffff; /* */ ves1820_writereg(state, 0x10, ves1820_inittab[0x10] & 0xdf); ves1820_writereg(state, 0x10, ves1820_inittab[0x10]); return 0; } static int ves1820_get_frontend(struct dvb_frontend *fe) { struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct ves1820_state* state = fe->demodulator_priv; int sync; s8 afc = 0; sync = ves1820_readreg(state, 0x11); afc = ves1820_readreg(state, 0x19); if (verbose) { /* */ printk(sync & 2 ? "ves1820: AFC (%d) %dHz\n" : "ves1820: [AFC (%d) %dHz]\n", afc, -((s32) p->symbol_rate * afc) >> 10); } if (!state->config->invert) { p->inversion = (state->reg0 & 0x20) ? INVERSION_ON : INVERSION_OFF; } else { p->inversion = (!(state->reg0 & 0x20)) ? INVERSION_ON : INVERSION_OFF; } p->modulation = ((state->reg0 >> 2) & 7) + QAM_16; p->fec_inner = FEC_NONE; p->frequency = ((p->frequency + 31250) / 62500) * 62500; if (sync & 2) p->frequency -= ((s32) p->symbol_rate * afc) >> 10; return 0; } static int ves1820_sleep(struct dvb_frontend* fe) { struct ves1820_state* state = fe->demodulator_priv; ves1820_writereg(state, 0x1b, 0x02); /* */ ves1820_writereg(state, 0x00, 0x80); /* */ return 0; } static int ves1820_get_tune_settings(struct dvb_frontend* fe, struct dvb_frontend_tune_settings* fesettings) { fesettings->min_delay_ms = 200; fesettings->step_size = 0; fesettings->max_drift = 0; return 0; } static void ves1820_release(struct dvb_frontend* fe) { struct ves1820_state* state = fe->demodulator_priv; kfree(state); } static struct dvb_frontend_ops ves1820_ops; struct dvb_frontend* ves1820_attach(const struct ves1820_config* config, struct i2c_adapter* i2c, u8 pwm) { struct ves1820_state* state = NULL; /* */ state = kzalloc(sizeof(struct ves1820_state), GFP_KERNEL); if (state == NULL) goto error; /* */ state->reg0 = ves1820_inittab[0]; state->config = config; state->i2c = i2c; state->pwm = pwm; /* */ if ((ves1820_readreg(state, 0x1a) & 0xf0) != 0x70) goto error; if (verbose) printk("ves1820: pwm=0x%02x\n", state->pwm); /* */ memcpy(&state->frontend.ops, &ves1820_ops, sizeof(struct dvb_frontend_ops)); state->frontend.ops.info.symbol_rate_min = (state->config->xin / 2) / 64; /* */ state->frontend.ops.info.symbol_rate_max = (state->config->xin / 2) / 4; /* */ state->frontend.demodulator_priv = state; return &state->frontend; error: kfree(state); return NULL; } static struct dvb_frontend_ops ves1820_ops = { .delsys = { SYS_DVBC_ANNEX_A }, .info = { .name = "VLSI VES1820 DVB-C", .frequency_stepsize = 62500, .frequency_min = 47000000, .frequency_max = 862000000, .caps = FE_CAN_QAM_16 | FE_CAN_QAM_32 | FE_CAN_QAM_64 | FE_CAN_QAM_128 | FE_CAN_QAM_256 | FE_CAN_FEC_AUTO }, .release = ves1820_release, .init = ves1820_init, .sleep = ves1820_sleep, .set_frontend = ves1820_set_parameters, .get_frontend = ves1820_get_frontend, .get_tune_settings = ves1820_get_tune_settings, .read_status = ves1820_read_status, .read_ber = ves1820_read_ber, .read_signal_strength = ves1820_read_signal_strength, .read_snr = ves1820_read_snr, .read_ucblocks = ves1820_read_ucblocks, }; module_param(verbose, int, 0644); MODULE_PARM_DESC(verbose, "print AFC offset after tuning for debugging the PWM setting"); MODULE_DESCRIPTION("VLSI VES1820 DVB-C Demodulator driver"); MODULE_AUTHOR("Ralph Metzler, Holger Waechtler"); MODULE_LICENSE("GPL"); EXPORT_SYMBOL(ves1820_attach);
gpl-2.0
gerboland/linux-2.6.15-neuros-eabi
drivers/char/rio/rioinit.c
8
42923
/* ** ----------------------------------------------------------------------------- ** ** Perle Specialix driver for Linux ** Ported from existing RIO Driver for SCO sources. * * (C) 1990 - 2000 Specialix International Ltd., Byfleet, Surrey, UK. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ** ** Module : rioinit.c ** SID : 1.3 ** Last Modified : 11/6/98 10:33:43 ** Retrieved : 11/6/98 10:33:49 ** ** ident @(#)rioinit.c 1.3 ** ** ----------------------------------------------------------------------------- */ #ifdef SCCS_LABELS static char *_rioinit_c_sccs_ = "@(#)rioinit.c 1.3"; #endif #include <linux/config.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/delay.h> #include <asm/io.h> #include <asm/system.h> #include <asm/string.h> #include <asm/semaphore.h> #include <asm/uaccess.h> #include <linux/termios.h> #include <linux/serial.h> #include <linux/generic_serial.h> #include "linux_compat.h" #include "typdef.h" #include "pkt.h" #include "daemon.h" #include "rio.h" #include "riospace.h" #include "top.h" #include "cmdpkt.h" #include "map.h" #include "riotypes.h" #include "rup.h" #include "port.h" #include "riodrvr.h" #include "rioinfo.h" #include "func.h" #include "errors.h" #include "pci.h" #include "parmmap.h" #include "unixrup.h" #include "board.h" #include "host.h" #include "error.h" #include "phb.h" #include "link.h" #include "cmdblk.h" #include "route.h" #include "control.h" #include "cirrus.h" #include "rioioctl.h" #include "rio_linux.h" #undef bcopy #define bcopy rio_pcicopy int RIOPCIinit(struct rio_info *p, int Mode); #if 0 static void RIOAllocateInterrupts(struct rio_info *); static int RIOReport(struct rio_info *); static void RIOStopInterrupts(struct rio_info *, int, int); #endif static int RIOScrub(int, BYTE *, int); #if 0 extern int rio_intr(); /* ** Init time code. */ void rioinit( p, info ) struct rio_info * p; struct RioHostInfo * info; { /* ** Multi-Host card support - taking the easy way out - sorry ! ** We allocate and set up the Host and Port structs when the ** driver is called to 'install' the first host. ** We check for this first 'call' by testing the RIOPortp pointer. */ if ( !p->RIOPortp ) { rio_dprintk (RIO_DEBUG_INIT, "Allocating and setting up driver data structures\n"); RIOAllocDataStructs(p); /* allocate host/port structs */ RIOSetupDataStructs(p); /* setup topology structs */ } RIOInitHosts( p, info ); /* hunt down the hardware */ RIOAllocateInterrupts(p); /* allocate interrupts */ RIOReport(p); /* show what we found */ } /* ** Initialise the Cards */ void RIOInitHosts(p, info) struct rio_info * p; struct RioHostInfo * info; { /* ** 15.10.1998 ARG - ESIL 0762 part fix ** If there is no ISA card definition - we always look for PCI cards. ** As we currently only support one host card this lets an ISA card ** definition take precedence over PLUG and PLAY. ** No ISA card - we are PLUG and PLAY with PCI. */ /* ** Note - for PCI both these will be zero, that's okay because ** RIOPCIInit() fills them in if a card is found. */ p->RIOHosts[p->RIONumHosts].Ivec = info->vector; p->RIOHosts[p->RIONumHosts].PaddrP = info->location; /* ** Check that we are able to accommodate another host */ if ( p->RIONumHosts >= RIO_HOSTS ) { p->RIOFailed++; return; } if ( info->bus & ISA_BUS ) { rio_dprintk (RIO_DEBUG_INIT, "initialising card %d (ISA)\n", p->RIONumHosts); RIOISAinit(p, p->mode); } else { rio_dprintk (RIO_DEBUG_INIT, "initialising card %d (PCI)\n", p->RIONumHosts); RIOPCIinit(p, RIO_PCI_DEFAULT_MODE); } rio_dprintk (RIO_DEBUG_INIT, "Total hosts initialised so far : %d\n", p->RIONumHosts); #ifdef FUTURE_RELEASE if (p->bus & EISA_BUS) /* EISA card */ RIOEISAinit(p, RIO_EISA_DEFAULT_MODE); if (p->bus & MCA_BUS) /* MCA card */ RIOMCAinit(p, RIO_MCA_DEFAULT_MODE); #endif } /* ** go through memory for an AT host that we pass in the device info ** structure and initialise */ void RIOISAinit(p, mode) struct rio_info * p; int mode; { /* XXX Need to implement this. */ #if 0 p->intr_tid = iointset(p->RIOHosts[p->RIONumHosts].Ivec, (int (*)())rio_intr, (char*)p->RIONumHosts); rio_dprintk (RIO_DEBUG_INIT, "Set interrupt handler, intr_tid = 0x%x\n", p->intr_tid ); if (RIODoAT(p, p->RIOHosts[p->RIONumHosts].PaddrP, mode)) { return; } else { rio_dprintk (RIO_DEBUG_INIT, "RIODoAT failed\n"); p->RIOFailed++; } #endif } /* ** RIODoAT : ** ** Map in a boards physical address, check that the board is there, ** test the board and if everything is okay assign the board an entry ** in the Rio Hosts structure. */ int RIODoAT(p, Base, mode) struct rio_info * p; int Base; int mode; { #define FOUND 1 #define NOT_FOUND 0 caddr_t cardAddr; /* ** Check to see if we actually have a board at this physical address. */ if ((cardAddr = RIOCheckForATCard(Base)) != 0) { /* ** Now test the board to see if it is working. */ if (RIOBoardTest(Base, cardAddr, RIO_AT, 0) == RIO_SUCCESS) { /* ** Fill out a slot in the Rio host structure. */ if (RIOAssignAT(p, Base, cardAddr, mode)) { return(FOUND); } } RIOMapout(Base, RIO_AT_MEM_SIZE, cardAddr); } return(NOT_FOUND); } caddr_t RIOCheckForATCard(Base) int Base; { int off; struct DpRam *cardp; /* (Points at the host) */ caddr_t virtAddr; unsigned char RIOSigTab[24]; /* ** Table of values to search for as prom signature of a host card */ strcpy(RIOSigTab, "JBJGPGGHINSMJPJR"); /* ** Hey! Yes, You reading this code! Yo, grab a load a this: ** ** IF the card is using WORD MODE rather than BYTE MODE ** then it will occupy 128K of PHYSICAL memory area. So, ** you might think that the following Mapin is wrong. Well, ** it isn't, because the SECOND 64K of occupied space is an ** EXACT COPY of the FIRST 64K. (good?), so, we need only ** map it in in one 64K block. */ if (RIOMapin(Base, RIO_AT_MEM_SIZE, &virtAddr) == -1) { rio_dprintk (RIO_DEBUG_INIT, "RIO-init: Couldn't map the board in!\n"); return((caddr_t)0); } /* ** virtAddr points to the DP ram of the system. ** We now cast this to a pointer to a RIO Host, ** and have a rummage about in the PROM. */ cardp = (struct DpRam *)virtAddr; for (off=0; RIOSigTab[off]; off++) { if ((RBYTE(cardp->DpSignature[off]) & 0xFF) != RIOSigTab[off]) { /* ** Signature mismatch - card not at this address */ RIOMapout(Base, RIO_AT_MEM_SIZE, virtAddr); rio_dprintk (RIO_DEBUG_INIT, "RIO-init: Couldn't match the signature 0x%x 0x%x!\n", (int)cardp, off); return((caddr_t)0); } } /* ** If we get here then we must have found a valid board so return ** its virtual address. */ return(virtAddr); } #endif /** ** RIOAssignAT : ** ** Fill out the fields in the p->RIOHosts structure now we know we know ** we have a board present. ** ** bits < 0 indicates 8 bit operation requested, ** bits > 0 indicates 16 bit operation. */ int RIOAssignAT(p, Base, virtAddr, mode) struct rio_info * p; int Base; caddr_t virtAddr; int mode; { int bits; struct DpRam *cardp = (struct DpRam *)virtAddr; if ((Base < ONE_MEG) || (mode & BYTE_ACCESS_MODE)) bits = BYTE_OPERATION; else bits = WORD_OPERATION; /* ** Board has passed its scrub test. Fill in all the ** transient stuff. */ p->RIOHosts[p->RIONumHosts].Caddr = virtAddr; p->RIOHosts[p->RIONumHosts].CardP = (struct DpRam *)virtAddr; /* ** Revision 01 AT host cards don't support WORD operations, */ if ( RBYTE(cardp->DpRevision) == 01 ) bits = BYTE_OPERATION; p->RIOHosts[p->RIONumHosts].Type = RIO_AT; p->RIOHosts[p->RIONumHosts].Copy = bcopy; /* set this later */ p->RIOHosts[p->RIONumHosts].Slot = -1; p->RIOHosts[p->RIONumHosts].Mode = SLOW_LINKS | SLOW_AT_BUS | bits; WBYTE(p->RIOHosts[p->RIONumHosts].Control, BOOT_FROM_RAM | EXTERNAL_BUS_OFF | p->RIOHosts[p->RIONumHosts].Mode | INTERRUPT_DISABLE ); WBYTE(p->RIOHosts[p->RIONumHosts].ResetInt,0xff); WBYTE(p->RIOHosts[p->RIONumHosts].Control, BOOT_FROM_RAM | EXTERNAL_BUS_OFF | p->RIOHosts[p->RIONumHosts].Mode | INTERRUPT_DISABLE ); WBYTE(p->RIOHosts[p->RIONumHosts].ResetInt,0xff); p->RIOHosts[p->RIONumHosts].UniqueNum = ((RBYTE(p->RIOHosts[p->RIONumHosts].Unique[0])&0xFF)<<0)| ((RBYTE(p->RIOHosts[p->RIONumHosts].Unique[1])&0xFF)<<8)| ((RBYTE(p->RIOHosts[p->RIONumHosts].Unique[2])&0xFF)<<16)| ((RBYTE(p->RIOHosts[p->RIONumHosts].Unique[3])&0xFF)<<24); rio_dprintk (RIO_DEBUG_INIT, "RIO-init: Uniquenum 0x%x\n",p->RIOHosts[p->RIONumHosts].UniqueNum); p->RIONumHosts++; rio_dprintk (RIO_DEBUG_INIT, "RIO-init: Tests Passed at 0x%x\n", Base); return(1); } #if 0 #ifdef FUTURE_RELEASE int RIOMCAinit(int Mode) { uchar SlotNumber; caddr_t Caddr; uint Paddr; uint Ivec; int Handle; int ret = 0; /* ** Valid mode information for MCA cards ** is only FAST LINKS */ Mode = (Mode & FAST_LINKS) ? McaTpFastLinks : McaTpSlowLinks; rio_dprintk (RIO_DEBUG_INIT, "RIOMCAinit(%d)\n",Mode); /* ** Check out each of the slots */ for (SlotNumber = 0; SlotNumber < McaMaxSlots; SlotNumber++) { /* ** Enable the slot we want to talk to */ outb( McaSlotSelect, SlotNumber | McaSlotEnable ); /* ** Read the ID word from the slot */ if (((inb(McaIdHigh)<< 8)|inb(McaIdLow)) == McaRIOId) { rio_dprintk (RIO_DEBUG_INIT, "Potential MCA card in slot %d\n", SlotNumber); /* ** Card appears to be a RIO MCA card! */ RIOMachineType |= (1<<RIO_MCA); /* ** Just check we haven't found too many wonderful objects */ if ( RIONumHosts >= RIO_HOSTS ) { Rprintf(RIOMesgTooManyCards); return(ret); } /* ** McaIrqEnable contains the interrupt vector, and a card ** enable bit. */ Ivec = inb(McaIrqEnable); rio_dprintk (RIO_DEBUG_INIT, "Ivec is %x\n", Ivec); switch ( Ivec & McaIrqMask ) { case McaIrq9: rio_dprintk (RIO_DEBUG_INIT, "IRQ9\n"); break; case McaIrq3: rio_dprintk (RIO_DEBUG_INIT, "IRQ3\n"); break; case McaIrq4: rio_dprintk (RIO_DEBUG_INIT, "IRQ4\n"); break; case McaIrq7: rio_dprintk (RIO_DEBUG_INIT, "IRQ7\n"); break; case McaIrq10: rio_dprintk (RIO_DEBUG_INIT, "IRQ10\n"); break; case McaIrq11: rio_dprintk (RIO_DEBUG_INIT, "IRQ11\n"); break; case McaIrq12: rio_dprintk (RIO_DEBUG_INIT, "IRQ12\n"); break; case McaIrq15: rio_dprintk (RIO_DEBUG_INIT, "IRQ15\n"); break; } /* ** If the card enable bit isn't set, then set it! */ if ((Ivec & McaCardEnable) != McaCardEnable) { rio_dprintk (RIO_DEBUG_INIT, "McaCardEnable not set - setting!\n"); outb(McaIrqEnable,Ivec|McaCardEnable); } else rio_dprintk (RIO_DEBUG_INIT, "McaCardEnable already set\n"); /* ** Convert the IRQ enable mask into something useful */ Ivec = RIOMcaToIvec[Ivec & McaIrqMask]; /* ** Find the physical address */ rio_dprintk (RIO_DEBUG_INIT, "inb(McaMemory) is %x\n", inb(McaMemory)); Paddr = McaAddress(inb(McaMemory)); rio_dprintk (RIO_DEBUG_INIT, "MCA card has Ivec %d Addr %x\n", Ivec, Paddr); if ( Paddr != 0 ) { /* ** Tell the memory mapper that we want to talk to it */ Handle = RIOMapin( Paddr, RIO_MCA_MEM_SIZE, &Caddr ); if ( Handle == -1 ) { rio_dprintk (RIO_DEBUG_INIT, "Couldn't map %d bytes at %x\n", RIO_MCA_MEM_SIZE, Paddr; continue; } rio_dprintk (RIO_DEBUG_INIT, "Board mapped to vaddr 0x%x\n", Caddr); /* ** And check that it is actually there! */ if ( RIOBoardTest( Paddr,Caddr,RIO_MCA,SlotNumber ) == RIO_SUCCESS ) { rio_dprintk (RIO_DEBUG_INIT, "Board has passed test\n"); rio_dprintk (RIO_DEBUG_INIT, "Slot %d. Type %d. Paddr 0x%x. Caddr 0x%x. Mode 0x%x.\n", SlotNumber, RIO_MCA, Paddr, Caddr, Mode); /* ** Board has passed its scrub test. Fill in all the ** transient stuff. */ p->RIOHosts[RIONumHosts].Slot = SlotNumber; p->RIOHosts[RIONumHosts].Ivec = Ivec; p->RIOHosts[RIONumHosts].Type = RIO_MCA; p->RIOHosts[RIONumHosts].Copy = bcopy; p->RIOHosts[RIONumHosts].PaddrP = Paddr; p->RIOHosts[RIONumHosts].Caddr = Caddr; p->RIOHosts[RIONumHosts].CardP = (struct DpRam *)Caddr; p->RIOHosts[RIONumHosts].Mode = Mode; WBYTE(p->RIOHosts[p->RIONumHosts].ResetInt , 0xff); p->RIOHosts[RIONumHosts].UniqueNum = ((RBYTE(p->RIOHosts[RIONumHosts].Unique[0])&0xFF)<<0)| ((RBYTE(p->RIOHosts[RIONumHosts].Unique[1])&0xFF)<<8)| ((RBYTE(p->RIOHosts[RIONumHosts].Unique[2])&0xFF)<<16)| ((RBYTE(p->RIOHosts[RIONumHosts].Unique[3])&0xFF)<<24); RIONumHosts++; ret++; } else { /* ** It failed the test, so ignore it. */ rio_dprintk (RIO_DEBUG_INIT, "TEST FAILED\n"); RIOMapout(Paddr, RIO_MCA_MEM_SIZE, Caddr ); } } else { rio_dprintk (RIO_DEBUG_INIT, "Slot %d - Paddr zero!\n", SlotNumber); } } else { rio_dprintk (RIO_DEBUG_INIT, "Slot %d NOT RIO\n", SlotNumber); } } /* ** Now we have checked all the slots, turn off the MCA slot selector */ outb(McaSlotSelect,0); rio_dprintk (RIO_DEBUG_INIT, "Slot %d NOT RIO\n", SlotNumber); return ret; } int RIOEISAinit( int Mode ) { static int EISADone = 0; uint Paddr; int PollIntMixMsgDone = 0; caddr_t Caddr; ushort Ident; uchar EisaSlot; uchar Ivec; int ret = 0; /* ** The only valid mode information for EISA hosts is fast or slow ** links. */ Mode = (Mode & FAST_LINKS) ? EISA_TP_FAST_LINKS : EISA_TP_SLOW_LINKS; if ( EISADone ) { rio_dprintk (RIO_DEBUG_INIT, "RIOEISAinit() - already done, return.\n"); return(0); } EISADone++; rio_dprintk (RIO_DEBUG_INIT, "RIOEISAinit()\n"); /* ** First check all cards to see if ANY are set for polled mode operation. ** If so, set ALL to polled. */ for ( EisaSlot=1; EisaSlot<=RIO_MAX_EISA_SLOTS; EisaSlot++ ) { Ident = (INBZ(EisaSlot,EISA_PRODUCT_IDENT_HI)<<8) | INBZ(EisaSlot,EISA_PRODUCT_IDENT_LO); if ( Ident == RIO_EISA_IDENT ) { rio_dprintk (RIO_DEBUG_INIT, "Found Specialix product\n"); if ( INBZ(EisaSlot,EISA_PRODUCT_NUMBER) != RIO_EISA_PRODUCT_CODE ) { rio_dprintk (RIO_DEBUG_INIT, "Not Specialix RIO - Product number %x\n", INBZ(EisaSlot, EISA_PRODUCT_NUMBER)); continue; /* next slot */ } /* ** Its a Specialix RIO! */ rio_dprintk (RIO_DEBUG_INIT, "RIO Revision %d\n", INBZ(EisaSlot, EISA_REVISION_NUMBER)); RIOMachineType |= (1<<RIO_EISA); /* ** Just check we haven't found too many wonderful objects */ if ( RIONumHosts >= RIO_HOSTS ) { Rprintf(RIOMesgTooManyCards); return 0; } /* ** Ensure that the enable bit is set! */ OUTBZ( EisaSlot, EISA_ENABLE, RIO_EISA_ENABLE_BIT ); /* ** EISA_INTERRUPT_VEC contains the interrupt vector. */ Ivec = INBZ(EisaSlot,EISA_INTERRUPT_VEC); #ifdef RIODEBUG switch ( Ivec & EISA_INTERRUPT_MASK ) { case EISA_IRQ_3: rio_dprintk (RIO_DEBUG_INIT, "EISA IRQ 3\n"); break; case EISA_IRQ_4: rio_dprintk (RIO_DEBUG_INIT, "EISA IRQ 4\n"); break; case EISA_IRQ_5: rio_dprintk (RIO_DEBUG_INIT, "EISA IRQ 5\n"); break; case EISA_IRQ_6: rio_dprintk (RIO_DEBUG_INIT, "EISA IRQ 6\n"); break; case EISA_IRQ_7: rio_dprintk (RIO_DEBUG_INIT, "EISA IRQ 7\n"); break; case EISA_IRQ_9: rio_dprintk (RIO_DEBUG_INIT, "EISA IRQ 9\n"); break; case EISA_IRQ_10: rio_dprintk (RIO_DEBUG_INIT, "EISA IRQ 10\n"); break; case EISA_IRQ_11: rio_dprintk (RIO_DEBUG_INIT, "EISA IRQ 11\n"); break; case EISA_IRQ_12: rio_dprintk (RIO_DEBUG_INIT, "EISA IRQ 12\n"); break; case EISA_IRQ_14: rio_dprintk (RIO_DEBUG_INIT, "EISA IRQ 14\n"); break; case EISA_IRQ_15: rio_dprintk (RIO_DEBUG_INIT, "EISA IRQ 15\n"); break; case EISA_POLLED: rio_dprintk (RIO_DEBUG_INIT, "EISA POLLED\n"); break; default: rio_dprintk (RIO_DEBUG_INIT, NULL,DBG_INIT|DBG_FAIL,"Shagged interrupt number!\n"); Ivec &= EISA_CONTROL_MASK; } #endif if ( (Ivec & EISA_INTERRUPT_MASK) == EISA_POLLED ) { RIOWillPoll = 1; break; /* From EisaSlot loop */ } } } /* ** Do it all again now we know whether to change all cards to polled ** mode or not */ for ( EisaSlot=1; EisaSlot<=RIO_MAX_EISA_SLOTS; EisaSlot++ ) { Ident = (INBZ(EisaSlot,EISA_PRODUCT_IDENT_HI)<<8) | INBZ(EisaSlot,EISA_PRODUCT_IDENT_LO); if ( Ident == RIO_EISA_IDENT ) { if ( INBZ(EisaSlot,EISA_PRODUCT_NUMBER) != RIO_EISA_PRODUCT_CODE ) continue; /* next slot */ /* ** Its a Specialix RIO! */ /* ** Ensure that the enable bit is set! */ OUTBZ( EisaSlot, EISA_ENABLE, RIO_EISA_ENABLE_BIT ); /* ** EISA_INTERRUPT_VEC contains the interrupt vector. */ Ivec = INBZ(EisaSlot,EISA_INTERRUPT_VEC); if ( RIOWillPoll ) { /* ** If we are going to operate in polled mode, but this ** board is configured to be interrupt driven, display ** the message explaining the situation to the punter, ** assuming we haven't already done so. */ if ( !PollIntMixMsgDone && (Ivec & EISA_INTERRUPT_MASK) != EISA_POLLED ) { Rprintf(RIOMesgAllPolled); PollIntMixMsgDone = 1; } /* ** Ungraciously ignore whatever the board reports as its ** interrupt vector... */ Ivec &= ~EISA_INTERRUPT_MASK; /* ** ...and force it to dance to the poll tune. */ Ivec |= EISA_POLLED; } /* ** Convert the IRQ enable mask into something useful (0-15) */ Ivec = RIOEisaToIvec(Ivec); rio_dprintk (RIO_DEBUG_INIT, "EISA host in slot %d has Ivec 0x%x\n", EisaSlot, Ivec); /* ** Find the physical address */ Paddr = (INBZ(EisaSlot,EISA_MEMORY_BASE_HI)<<24) | (INBZ(EisaSlot,EISA_MEMORY_BASE_LO)<<16); rio_dprintk (RIO_DEBUG_INIT, "EISA card has Ivec %d Addr %x\n", Ivec, Paddr); if ( Paddr == 0 ) { rio_dprintk (RIO_DEBUG_INIT, "Board in slot %d configured for address zero!\n", EisaSlot); continue; } /* ** Tell the memory mapper that we want to talk to it */ rio_dprintk (RIO_DEBUG_INIT, "About to map EISA card \n"); if (RIOMapin( Paddr, RIO_EISA_MEM_SIZE, &Caddr) == -1) { rio_dprintk (RIO_DEBUG_INIT, "Couldn't map %d bytes at %x\n", RIO_EISA_MEM_SIZE,Paddr); continue; } rio_dprintk (RIO_DEBUG_INIT, "Board mapped to vaddr 0x%x\n", Caddr); /* ** And check that it is actually there! */ if ( RIOBoardTest( Paddr,Caddr,RIO_EISA,EisaSlot) == RIO_SUCCESS ) { rio_dprintk (RIO_DEBUG_INIT, "Board has passed test\n"); rio_dprintk (RIO_DEBUG_INIT, "Slot %d. Ivec %d. Type %d. Paddr 0x%x. Caddr 0x%x. Mode 0x%x.\n", EisaSlot,Ivec,RIO_EISA,Paddr,Caddr,Mode); /* ** Board has passed its scrub test. Fill in all the ** transient stuff. */ p->RIOHosts[RIONumHosts].Slot = EisaSlot; p->RIOHosts[RIONumHosts].Ivec = Ivec; p->RIOHosts[RIONumHosts].Type = RIO_EISA; p->RIOHosts[RIONumHosts].Copy = bcopy; p->RIOHosts[RIONumHosts].PaddrP = Paddr; p->RIOHosts[RIONumHosts].Caddr = Caddr; p->RIOHosts[RIONumHosts].CardP = (struct DpRam *)Caddr; p->RIOHosts[RIONumHosts].Mode = Mode; /* ** because the EISA prom is mapped into IO space, we ** need to copy the unqiue number into the memory area ** that it would have occupied, so that the download ** code can determine its ID and card type. */ WBYTE(p->RIOHosts[RIONumHosts].Unique[0],INBZ(EisaSlot,EISA_UNIQUE_NUM_0)); WBYTE(p->RIOHosts[RIONumHosts].Unique[1],INBZ(EisaSlot,EISA_UNIQUE_NUM_1)); WBYTE(p->RIOHosts[RIONumHosts].Unique[2],INBZ(EisaSlot,EISA_UNIQUE_NUM_2)); WBYTE(p->RIOHosts[RIONumHosts].Unique[3],INBZ(EisaSlot,EISA_UNIQUE_NUM_3)); p->RIOHosts[RIONumHosts].UniqueNum = ((RBYTE(p->RIOHosts[RIONumHosts].Unique[0])&0xFF)<<0)| ((RBYTE(p->RIOHosts[RIONumHosts].Unique[1])&0xFF)<<8)| ((RBYTE(p->RIOHosts[RIONumHosts].Unique[2])&0xFF)<<16)| ((RBYTE(p->RIOHosts[RIONumHosts].Unique[3])&0xFF)<<24); INBZ(EisaSlot,EISA_INTERRUPT_RESET); RIONumHosts++; ret++; } else { /* ** It failed the test, so ignore it. */ rio_dprintk (RIO_DEBUG_INIT, "TEST FAILED\n"); RIOMapout(Paddr, RIO_EISA_MEM_SIZE, Caddr ); } } } if (RIOMachineType & RIO_EISA) return ret+1; return ret; } #endif #ifndef linux #define CONFIG_ADDRESS 0xcf8 #define CONFIG_DATA 0xcfc #define FORWARD_REG 0xcfa static int read_config(int bus_number, int device_num, int r_number) { unsigned int cav; unsigned int val; /* Build config_address_value: 31 24 23 16 15 11 10 8 7 0 ------------------------------------------------------ |1| 0000000 | bus_number | device # | 000 | register | ------------------------------------------------------ */ cav = r_number & 0xff; cav |= ((device_num & 0x1f) << 11); cav |= ((bus_number & 0xff) << 16); cav |= 0x80000000; /* Enable bit */ outpd(CONFIG_ADDRESS,cav); val = inpd(CONFIG_DATA); outpd(CONFIG_ADDRESS,0); return val; } static write_config(bus_number,device_num,r_number,val) { unsigned int cav; /* Build config_address_value: 31 24 23 16 15 11 10 8 7 0 ------------------------------------------------------ |1| 0000000 | bus_number | device # | 000 | register | ------------------------------------------------------ */ cav = r_number & 0xff; cav |= ((device_num & 0x1f) << 11); cav |= ((bus_number & 0xff) << 16); cav |= 0x80000000; /* Enable bit */ outpd(CONFIG_ADDRESS, cav); outpd(CONFIG_DATA, val); outpd(CONFIG_ADDRESS, 0); return val; } #else /* XXX Implement these... */ static int read_config(int bus_number, int device_num, int r_number) { return 0; } static int write_config(int bus_number, int device_num, int r_number) { return 0; } #endif int RIOPCIinit(p, Mode) struct rio_info *p; int Mode; { #define MAX_PCI_SLOT 32 #define RIO_PCI_JET_CARD 0x200011CB static int slot; /* count of machine's PCI slots searched so far */ caddr_t Caddr; /* Virtual address of the current PCI host card. */ unsigned char Ivec; /* interrupt vector for the current PCI host */ unsigned long Paddr; /* Physical address for the current PCI host */ int Handle; /* Handle to Virtual memory allocated for current PCI host */ rio_dprintk (RIO_DEBUG_INIT, "Search for a RIO PCI card - start at slot %d\n", slot); /* ** Initialise the search status */ p->RIOLastPCISearch = RIO_FAIL; while ( (slot < MAX_PCI_SLOT) & (p->RIOLastPCISearch != RIO_SUCCESS) ) { rio_dprintk (RIO_DEBUG_INIT, "Currently testing slot %d\n", slot); if (read_config(0,slot,0) == RIO_PCI_JET_CARD) { p->RIOHosts[p->RIONumHosts].Ivec = 0; Paddr = read_config(0,slot,0x18); Paddr = Paddr - (Paddr & 0x1); /* Mask off the io bit */ if ( (Paddr == 0) || ((Paddr & 0xffff0000) == 0xffff0000) ) { rio_dprintk (RIO_DEBUG_INIT, "Goofed up slot\n"); /* what! */ slot++; continue; } p->RIOHosts[p->RIONumHosts].PaddrP = Paddr; Ivec = (read_config(0,slot,0x3c) & 0xff); rio_dprintk (RIO_DEBUG_INIT, "PCI Host at 0x%x, Intr %d\n", (int)Paddr, Ivec); Handle = RIOMapin( Paddr, RIO_PCI_MEM_SIZE, &Caddr ); if (Handle == -1) { rio_dprintk (RIO_DEBUG_INIT, "Couldn't map %d bytes at 0x%x\n", RIO_PCI_MEM_SIZE, (int)Paddr); slot++; continue; } p->RIOHosts[p->RIONumHosts].Ivec = Ivec + 32; p->intr_tid = iointset(p->RIOHosts[p->RIONumHosts].Ivec, (int (*)())rio_intr, (char *)p->RIONumHosts); if (RIOBoardTest( Paddr, Caddr, RIO_PCI, 0 ) == RIO_SUCCESS) { rio_dprintk (RIO_DEBUG_INIT, ("Board has passed test\n"); rio_dprintk (RIO_DEBUG_INIT, ("Paddr 0x%x. Caddr 0x%x. Mode 0x%x.\n", Paddr, Caddr, Mode); /* ** Board has passed its scrub test. Fill in all the ** transient stuff. */ p->RIOHosts[p->RIONumHosts].Slot = 0; p->RIOHosts[p->RIONumHosts].Ivec = Ivec + 32; p->RIOHosts[p->RIONumHosts].Type = RIO_PCI; p->RIOHosts[p->RIONumHosts].Copy = rio_pcicopy; p->RIOHosts[p->RIONumHosts].PaddrP = Paddr; p->RIOHosts[p->RIONumHosts].Caddr = Caddr; p->RIOHosts[p->RIONumHosts].CardP = (struct DpRam *)Caddr; p->RIOHosts[p->RIONumHosts].Mode = Mode; #if 0 WBYTE(p->RIOHosts[p->RIONumHosts].Control, BOOT_FROM_RAM | EXTERNAL_BUS_OFF | p->RIOHosts[p->RIONumHosts].Mode | INTERRUPT_DISABLE ); WBYTE(p->RIOHosts[p->RIONumHosts].ResetInt,0xff); WBYTE(p->RIOHosts[p->RIONumHosts].Control, BOOT_FROM_RAM | EXTERNAL_BUS_OFF | p->RIOHosts[p->RIONumHosts].Mode | INTERRUPT_DISABLE ); WBYTE(p->RIOHosts[p->RIONumHosts].ResetInt,0xff); #else WBYTE(p->RIOHosts[p->RIONumHosts].ResetInt, 0xff); #endif p->RIOHosts[p->RIONumHosts].UniqueNum = ((RBYTE(p->RIOHosts[p->RIONumHosts].Unique[0])&0xFF)<<0)| ((RBYTE(p->RIOHosts[p->RIONumHosts].Unique[1])&0xFF)<<8)| ((RBYTE(p->RIOHosts[p->RIONumHosts].Unique[2])&0xFF)<<16)| ((RBYTE(p->RIOHosts[p->RIONumHosts].Unique[3])&0xFF)<<24); rio_dprintk (RIO_DEBUG_INIT, "Unique no 0x%x.\n", p->RIOHosts[p->RIONumHosts].UniqueNum); p->RIOLastPCISearch = RIO_SUCCESS; p->RIONumHosts++; } } slot++; } if ( slot >= MAX_PCI_SLOT ) { rio_dprintk (RIO_DEBUG_INIT, "All %d PCI slots have tested for RIO cards !!!\n", MAX_PCI_SLOT); } /* ** I don't think we want to do this anymore ** if (!p->RIOLastPCISearch == RIO_FAIL ) { p->RIOFailed++; } ** */ } #ifdef FUTURE_RELEASE void riohalt( void ) { int host; for ( host=0; host<p->RIONumHosts; host++ ) { rio_dprintk (RIO_DEBUG_INIT, "Stop host %d\n", host); (void)RIOBoardTest( p->RIOHosts[host].PaddrP, p->RIOHosts[host].Caddr, p->RIOHosts[host].Type,p->RIOHosts[host].Slot ); } } #endif #endif static uchar val[] = { #ifdef VERY_LONG_TEST 0x00, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0xa5, 0xff, 0x5a, 0x00, 0xff, 0xc9, 0x36, #endif 0xff, 0x00, 0x00 }; #define TEST_END sizeof(val) /* ** RAM test a board. ** Nothing too complicated, just enough to check it out. */ int RIOBoardTest(paddr, caddr, type, slot) paddr_t paddr; caddr_t caddr; uchar type; int slot; { struct DpRam *DpRam = (struct DpRam *)caddr; char *ram[4]; int size[4]; int op, bank; int nbanks; rio_dprintk (RIO_DEBUG_INIT, "RIO-init: Reset host type=%d, DpRam=0x%x, slot=%d\n", type,(int)DpRam, slot); RIOHostReset(type, DpRam, slot); /* ** Scrub the memory. This comes in several banks: ** DPsram1 - 7000h bytes ** DPsram2 - 200h bytes ** DPsram3 - 7000h bytes ** scratch - 1000h bytes */ rio_dprintk (RIO_DEBUG_INIT, "RIO-init: Setup ram/size arrays\n"); size[0] = DP_SRAM1_SIZE; size[1] = DP_SRAM2_SIZE; size[2] = DP_SRAM3_SIZE; size[3] = DP_SCRATCH_SIZE; ram[0] = (char *)&DpRam->DpSram1[0]; ram[1] = (char *)&DpRam->DpSram2[0]; ram[2] = (char *)&DpRam->DpSram3[0]; nbanks = (type == RIO_PCI) ? 3 : 4; if (nbanks == 4) ram[3] = (char *)&DpRam->DpScratch[0]; if (nbanks == 3) { rio_dprintk (RIO_DEBUG_INIT, "RIO-init: Memory: 0x%x(0x%x), 0x%x(0x%x), 0x%x(0x%x)\n", (int)ram[0], size[0], (int)ram[1], size[1], (int)ram[2], size[2]); } else { rio_dprintk (RIO_DEBUG_INIT, "RIO-init: 0x%x(0x%x), 0x%x(0x%x), 0x%x(0x%x), 0x%x(0x%x)\n", (int)ram[0], size[0], (int)ram[1], size[1], (int)ram[2], size[2], (int)ram[3], size[3]); } /* ** This scrub operation will test for crosstalk between ** banks. TEST_END is a magic number, and relates to the offset ** within the 'val' array used by Scrub. */ for (op=0; op<TEST_END; op++) { for (bank=0; bank<nbanks; bank++) { if (RIOScrub(op, (BYTE *)ram[bank], size[bank]) == RIO_FAIL) { rio_dprintk (RIO_DEBUG_INIT, "RIO-init: RIOScrub band %d, op %d failed\n", bank, op); return RIO_FAIL; } } } rio_dprintk (RIO_DEBUG_INIT, "Test completed\n"); return RIO_SUCCESS; } /* ** Scrub an area of RAM. ** Define PRETEST and POSTTEST for a more thorough checking of the ** state of the memory. ** Call with op set to an index into the above 'val' array to determine ** which value will be written into memory. ** Call with op set to zero means that the RAM will not be read and checked ** before it is written. ** Call with op not zero, and the RAM will be read and compated with val[op-1] ** to check that the data from the previous phase was retained. */ static int RIOScrub(op, ram, size) int op; BYTE * ram; int size; { int off; unsigned char oldbyte; unsigned char newbyte; unsigned char invbyte; unsigned short oldword; unsigned short newword; unsigned short invword; unsigned short swapword; if (op) { oldbyte = val[op-1]; oldword = oldbyte | (oldbyte<<8); } else oldbyte = oldword = 0; /* Tell the compiler we've initilalized them. */ newbyte = val[op]; newword = newbyte | (newbyte<<8); invbyte = ~newbyte; invword = invbyte | (invbyte<<8); /* ** Check that the RAM contains the value that should have been left there ** by the previous test (not applicable for pass zero) */ if (op) { for (off=0; off<size; off++) { if (RBYTE(ram[off]) != oldbyte) { rio_dprintk (RIO_DEBUG_INIT, "RIO-init: Byte Pre Check 1: BYTE at offset 0x%x should have been=%x, was=%x\n", off, oldbyte, RBYTE(ram[off])); return RIO_FAIL; } } for (off=0; off<size; off+=2) { if (*(ushort *)&ram[off] != oldword) { rio_dprintk (RIO_DEBUG_INIT, "RIO-init: Word Pre Check: WORD at offset 0x%x should have been=%x, was=%x\n",off,oldword,*(ushort *)&ram[off]); rio_dprintk (RIO_DEBUG_INIT, "RIO-init: Word Pre Check: BYTE at offset 0x%x is %x BYTE at offset 0x%x is %x\n", off, RBYTE(ram[off]), off+1, RBYTE(ram[off+1])); return RIO_FAIL; } } } /* ** Now write the INVERSE of the test data into every location, using ** BYTE write operations, first checking before each byte is written ** that the location contains the old value still, and checking after ** the write that the location contains the data specified - this is ** the BYTE read/write test. */ for (off=0; off<size; off++) { if (op && (RBYTE(ram[off]) != oldbyte)) { rio_dprintk (RIO_DEBUG_INIT, "RIO-init: Byte Pre Check 2: BYTE at offset 0x%x should have been=%x, was=%x\n", off, oldbyte, RBYTE(ram[off])); return RIO_FAIL; } WBYTE(ram[off],invbyte); if (RBYTE(ram[off]) != invbyte) { rio_dprintk (RIO_DEBUG_INIT, "RIO-init: Byte Inv Check: BYTE at offset 0x%x should have been=%x, was=%x\n", off, invbyte, RBYTE(ram[off])); return RIO_FAIL; } } /* ** now, use WORD operations to write the test value into every location, ** check as before that the location contains the previous test value ** before overwriting, and that it contains the data value written ** afterwards. ** This is the WORD operation test. */ for (off=0; off<size; off+=2) { if (*(ushort *)&ram[off] != invword) { rio_dprintk (RIO_DEBUG_INIT, "RIO-init: Word Inv Check: WORD at offset 0x%x should have been=%x, was=%x\n", off, invword, *(ushort *)&ram[off]); rio_dprintk (RIO_DEBUG_INIT, "RIO-init: Word Inv Check: BYTE at offset 0x%x is %x BYTE at offset 0x%x is %x\n", off, RBYTE(ram[off]), off+1, RBYTE(ram[off+1])); return RIO_FAIL; } *(ushort *)&ram[off] = newword; if ( *(ushort *)&ram[off] != newword ) { rio_dprintk (RIO_DEBUG_INIT, "RIO-init: Post Word Check 1: WORD at offset 0x%x should have been=%x, was=%x\n", off, newword, *(ushort *)&ram[off]); rio_dprintk (RIO_DEBUG_INIT, "RIO-init: Post Word Check 1: BYTE at offset 0x%x is %x BYTE at offset 0x%x is %x\n", off, RBYTE(ram[off]), off+1, RBYTE(ram[off+1])); return RIO_FAIL; } } /* ** now run through the block of memory again, first in byte mode ** then in word mode, and check that all the locations contain the ** required test data. */ for (off=0; off<size; off++) { if (RBYTE(ram[off]) != newbyte) { rio_dprintk (RIO_DEBUG_INIT, "RIO-init: Post Byte Check: BYTE at offset 0x%x should have been=%x, was=%x\n", off, newbyte, RBYTE(ram[off])); return RIO_FAIL; } } for (off=0; off<size; off+=2) { if ( *(ushort *)&ram[off] != newword ) { rio_dprintk (RIO_DEBUG_INIT, "RIO-init: Post Word Check 2: WORD at offset 0x%x should have been=%x, was=%x\n", off, newword, *(ushort *)&ram[off]); rio_dprintk (RIO_DEBUG_INIT, "RIO-init: Post Word Check 2: BYTE at offset 0x%x is %x BYTE at offset 0x%x is %x\n", off, RBYTE(ram[off]), off+1, RBYTE(ram[off+1])); return RIO_FAIL; } } /* ** time to check out byte swapping errors */ swapword = invbyte | (newbyte << 8); for (off=0; off<size; off+=2) { WBYTE(ram[off],invbyte); WBYTE(ram[off+1],newbyte); } for ( off=0; off<size; off+=2 ) { if (*(ushort *)&ram[off] != swapword) { rio_dprintk (RIO_DEBUG_INIT, "RIO-init: SwapWord Check 1: WORD at offset 0x%x should have been=%x, was=%x\n", off, swapword, *((ushort *)&ram[off])); rio_dprintk (RIO_DEBUG_INIT, "RIO-init: SwapWord Check 1: BYTE at offset 0x%x is %x BYTE at offset 0x%x is %x\n", off, RBYTE(ram[off]), off+1, RBYTE(ram[off+1])); return RIO_FAIL; } *((ushort *)&ram[off]) = ~swapword; } for (off=0; off<size; off+=2) { if (RBYTE(ram[off]) != newbyte) { rio_dprintk (RIO_DEBUG_INIT, "RIO-init: SwapWord Check 2: BYTE at offset 0x%x should have been=%x, was=%x\n", off, newbyte, RBYTE(ram[off])); return RIO_FAIL; } if (RBYTE(ram[off+1]) != invbyte) { rio_dprintk (RIO_DEBUG_INIT, "RIO-init: SwapWord Check 2: BYTE at offset 0x%x should have been=%x, was=%x\n", off+1, invbyte, RBYTE(ram[off+1])); return RIO_FAIL; } *((ushort *)&ram[off]) = newword; } return RIO_SUCCESS; } /* ** try to ensure that every host is either in polled mode ** or is in interrupt mode. Only allow interrupt mode if ** all hosts can interrupt (why?) ** and force into polled mode if told to. Patch up the ** interrupt vector & salute The Queen when you've done. */ #if 0 static void RIOAllocateInterrupts(p) struct rio_info * p; { int Host; /* ** Easy case - if we have been told to poll, then we poll. */ if (p->mode & POLLED_MODE) { RIOStopInterrupts(p, 0, 0); return; } /* ** check - if any host has been set to polled mode, then all must be. */ for (Host=0; Host<p->RIONumHosts; Host++) { if ( (p->RIOHosts[Host].Type != RIO_AT) && (p->RIOHosts[Host].Ivec == POLLED) ) { RIOStopInterrupts(p, 1, Host ); return; } } for (Host=0; Host<p->RIONumHosts; Host++) { if (p->RIOHosts[Host].Type == RIO_AT) { if ( (p->RIOHosts[Host].Ivec - 32) == 0) { RIOStopInterrupts(p, 2, Host ); return; } } } } /* ** something has decided that we can't be doing with these ** new-fangled interrupt thingies. Set everything up to just ** poll. */ static void RIOStopInterrupts(p, Reason, Host) struct rio_info * p; int Reason; int Host; { #ifdef FUTURE_RELEASE switch (Reason) { case 0: /* forced into polling by rio_polled */ break; case 1: /* SCU has set 'Host' into polled mode */ break; case 2: /* there aren't enough interrupt vectors for 'Host' */ break; } #endif for (Host=0; Host<p->RIONumHosts; Host++ ) { struct Host *HostP = &p->RIOHosts[Host]; switch (HostP->Type) { case RIO_AT: /* ** The AT host has it's interrupts disabled by clearing the ** int_enable bit. */ HostP->Mode &= ~INTERRUPT_ENABLE; HostP->Ivec = POLLED; break; #ifdef FUTURE_RELEASE case RIO_EISA: /* ** The EISA host has it's interrupts disabled by setting the ** Ivec to zero */ HostP->Ivec = POLLED; break; #endif case RIO_PCI: /* ** The PCI host has it's interrupts disabled by clearing the ** int_enable bit, like a regular host card. */ HostP->Mode &= ~RIO_PCI_INT_ENABLE; HostP->Ivec = POLLED; break; #ifdef FUTURE_RELEASE case RIO_MCA: /* ** There's always one, isn't there? ** The MCA host card cannot have it's interrupts disabled. */ RIOPatchVec(HostP); break; #endif } } } /* ** This function is called at init time to setup the data structures. */ void RIOAllocDataStructs(p) struct rio_info * p; { int port, host, tm; p->RIOPortp = (struct Port *)sysbrk(RIO_PORTS * sizeof(struct Port)); if (!p->RIOPortp) { rio_dprintk (RIO_DEBUG_INIT, "RIO-init: No memory for port structures\n"); p->RIOFailed++; return; } bzero( p->RIOPortp, sizeof(struct Port) * RIO_PORTS ); rio_dprintk (RIO_DEBUG_INIT, "RIO-init: allocated and cleared memory for port structs\n"); rio_dprintk (RIO_DEBUG_INIT, "First RIO port struct @0x%x, size=0x%x bytes\n", (int)p->RIOPortp, sizeof(struct Port)); for( port=0; port<RIO_PORTS; port++ ) { p->RIOPortp[port].PortNum = port; p->RIOPortp[port].TtyP = &p->channel[port]; sreset (p->RIOPortp[port].InUse); /* Let the first guy uses it */ p->RIOPortp[port].portSem = -1; /* Let the first guy takes it */ p->RIOPortp[port].ParamSem = -1; /* Let the first guy takes it */ p->RIOPortp[port].timeout_id = 0; /* Let the first guy takes it */ } p->RIOHosts = (struct Host *)sysbrk(RIO_HOSTS * sizeof(struct Host)); if (!p->RIOHosts) { rio_dprintk (RIO_DEBUG_INIT, "RIO-init: No memory for host structures\n"); p->RIOFailed++; return; } bzero(p->RIOHosts, sizeof(struct Host)*RIO_HOSTS); rio_dprintk (RIO_DEBUG_INIT, "RIO-init: allocated and cleared memory for host structs\n"); rio_dprintk (RIO_DEBUG_INIT, "First RIO host struct @0x%x, size=0x%x bytes\n", (int)p->RIOHosts, sizeof(struct Host)); for( host=0; host<RIO_HOSTS; host++ ) { spin_lock_init (&p->RIOHosts[host].HostLock); p->RIOHosts[host].timeout_id = 0; /* Let the first guy takes it */ } /* ** check that the buffer size is valid, round down to the next power of ** two if necessary; if the result is zero, then, hey, no double buffers. */ for ( tm = 1; tm && tm <= p->RIOConf.BufferSize; tm <<= 1 ) ; tm >>= 1; p->RIOBufferSize = tm; p->RIOBufferMask = tm ? tm - 1 : 0; } /* ** this function gets called whenever the data structures need to be ** re-setup, for example, after a riohalt (why did I ever invent it?) */ void RIOSetupDataStructs(p) struct rio_info * p; { int host, entry, rup; for ( host=0; host<RIO_HOSTS; host++ ) { struct Host *HostP = &p->RIOHosts[host]; for ( entry=0; entry<LINKS_PER_UNIT; entry++ ) { HostP->Topology[entry].Unit = ROUTE_DISCONNECT; HostP->Topology[entry].Link = NO_LINK; } bcopy("HOST X", HostP->Name, 7); HostP->Name[5] = '1'+host; for (rup=0; rup<(MAX_RUP + LINKS_PER_UNIT); rup++) { if (rup < MAX_RUP) { for (entry=0; entry<LINKS_PER_UNIT; entry++ ) { HostP->Mapping[rup].Topology[entry].Unit = ROUTE_DISCONNECT; HostP->Mapping[rup].Topology[entry].Link = NO_LINK; } RIODefaultName(p, HostP, rup); } spin_lock_init(&HostP->UnixRups[rup].RupLock); } } } #endif int RIODefaultName(p, HostP, UnitId) struct rio_info * p; struct Host * HostP; uint UnitId; { #ifdef CHECK CheckHost( Host ); CheckUnitId( UnitId ); #endif bcopy("UNKNOWN RTA X-XX",HostP->Mapping[UnitId].Name,17); HostP->Mapping[UnitId].Name[12]='1'+(HostP-p->RIOHosts); if ((UnitId+1) > 9) { HostP->Mapping[UnitId].Name[14]='0'+((UnitId+1)/10); HostP->Mapping[UnitId].Name[15]='0'+((UnitId+1)%10); } else { HostP->Mapping[UnitId].Name[14]='1'+UnitId; HostP->Mapping[UnitId].Name[15]=0; } return 0; } #define RIO_RELEASE "Linux" #define RELEASE_ID "1.0" #if 0 static int RIOReport(p) struct rio_info * p; { char * RIORelease = RIO_RELEASE; char * RIORelID = RELEASE_ID; int host; rio_dprintk (RIO_DEBUG_INIT, "RIO : Release: %s ID: %s\n", RIORelease, RIORelID); if ( p->RIONumHosts==0 ) { rio_dprintk (RIO_DEBUG_INIT, "\nNo Hosts configured\n"); return(0); } for ( host=0; host < p->RIONumHosts; host++ ) { struct Host *HostP = &p->RIOHosts[host]; switch ( HostP->Type ) { case RIO_AT: rio_dprintk (RIO_DEBUG_INIT, "AT BUS : found the card at 0x%x\n", HostP->PaddrP); } } return 0; } #endif static struct rioVersion stVersion; struct rioVersion * RIOVersid(void) { strlcpy(stVersion.version, "RIO driver for linux V1.0", sizeof(stVersion.version)); strlcpy(stVersion.buildDate, __DATE__, sizeof(stVersion.buildDate)); return &stVersion; } #if 0 int RIOMapin(paddr, size, vaddr) paddr_t paddr; int size; caddr_t * vaddr; { *vaddr = (caddr_t)permap( (long)paddr, size); return ((int)*vaddr); } void RIOMapout(paddr, size, vaddr) paddr_t paddr; long size; caddr_t vaddr; { } #endif void RIOHostReset(Type, DpRamP, Slot) uint Type; volatile struct DpRam *DpRamP; uint Slot; { /* ** Reset the Tpu */ rio_dprintk (RIO_DEBUG_INIT, "RIOHostReset: type 0x%x", Type); switch ( Type ) { case RIO_AT: rio_dprintk (RIO_DEBUG_INIT, " (RIO_AT)\n"); WBYTE(DpRamP->DpControl, BOOT_FROM_RAM | EXTERNAL_BUS_OFF | INTERRUPT_DISABLE | BYTE_OPERATION | SLOW_LINKS | SLOW_AT_BUS); WBYTE(DpRamP->DpResetTpu, 0xFF); udelay(3); rio_dprintk (RIO_DEBUG_INIT, "RIOHostReset: Don't know if it worked. Try reset again\n"); WBYTE(DpRamP->DpControl, BOOT_FROM_RAM | EXTERNAL_BUS_OFF | INTERRUPT_DISABLE | BYTE_OPERATION | SLOW_LINKS | SLOW_AT_BUS); WBYTE(DpRamP->DpResetTpu, 0xFF); udelay(3); break; #ifdef FUTURE_RELEASE case RIO_EISA: /* ** Bet this doesn't work! */ OUTBZ( Slot, EISA_CONTROL_PORT, EISA_TP_RUN | EISA_TP_BUS_DISABLE | EISA_TP_SLOW_LINKS | EISA_TP_BOOT_FROM_RAM ); OUTBZ( Slot, EISA_CONTROL_PORT, EISA_TP_RESET | EISA_TP_BUS_DISABLE | EISA_TP_SLOW_LINKS | EISA_TP_BOOT_FROM_RAM ); suspend( 3 ); OUTBZ( Slot, EISA_CONTROL_PORT, EISA_TP_RUN | EISA_TP_BUS_DISABLE | EISA_TP_SLOW_LINKS | EISA_TP_BOOT_FROM_RAM ); break; case RIO_MCA: WBYTE(DpRamP->DpControl , McaTpBootFromRam | McaTpBusDisable ); WBYTE(DpRamP->DpResetTpu , 0xFF ); suspend( 3 ); WBYTE(DpRamP->DpControl , McaTpBootFromRam | McaTpBusDisable ); WBYTE(DpRamP->DpResetTpu , 0xFF ); suspend( 3 ); break; #endif case RIO_PCI: rio_dprintk (RIO_DEBUG_INIT, " (RIO_PCI)\n"); DpRamP->DpControl = RIO_PCI_BOOT_FROM_RAM; DpRamP->DpResetInt = 0xFF; DpRamP->DpResetTpu = 0xFF; udelay(100); /* for (i=0; i<6000; i++); */ /* suspend( 3 ); */ break; #ifdef FUTURE_RELEASE default: Rprintf(RIOMesgNoSupport,Type,DpRamP,Slot); return; #endif default: rio_dprintk (RIO_DEBUG_INIT, " (UNKNOWN)\n"); break; } return; }
gpl-2.0
KylinUI/android_kernel_motorola_msm8960dt-common
sound/usb/mixer.c
8
64674
/* * (Tentative) USB Audio Driver for ALSA * * Mixer control part * * Copyright (c) 2002 by Takashi Iwai <tiwai@suse.de> * * Many codes borrowed from audio.c by * Alan Cox (alan@lxorguk.ukuu.org.uk) * Thomas Sailer (sailer@ife.ee.ethz.ch) * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ /* * TODOs, for both the mixer and the streaming interfaces: * * - support for UAC2 effect units * - support for graphical equalizers * - RANGE and MEM set commands (UAC2) * - RANGE and MEM interrupt dispatchers (UAC2) * - audio channel clustering (UAC2) * - audio sample rate converter units (UAC2) * - proper handling of clock multipliers (UAC2) * - dispatch clock change notifications (UAC2) * - stop PCM streams which use a clock that became invalid * - stop PCM streams which use a clock selector that has changed * - parse available sample rates again when clock sources changed */ #include <linux/bitops.h> #include <linux/init.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/usb.h> #include <linux/usb/audio.h> #include <linux/usb/audio-v2.h> #include <sound/core.h> #include <sound/control.h> #include <sound/hwdep.h> #include <sound/info.h> #include <sound/tlv.h> #include "usbaudio.h" #include "mixer.h" #include "helper.h" #include "mixer_quirks.h" #include "power.h" #define MAX_ID_ELEMS 256 struct usb_audio_term { int id; int type; int channels; unsigned int chconfig; int name; }; struct usbmix_name_map; struct mixer_build { struct snd_usb_audio *chip; struct usb_mixer_interface *mixer; unsigned char *buffer; unsigned int buflen; DECLARE_BITMAP(unitbitmap, MAX_ID_ELEMS); struct usb_audio_term oterm; const struct usbmix_name_map *map; const struct usbmix_selector_map *selector_map; }; /*E-mu 0202/0404/0204 eXtension Unit(XU) control*/ enum { USB_XU_CLOCK_RATE = 0xe301, USB_XU_CLOCK_SOURCE = 0xe302, USB_XU_DIGITAL_IO_STATUS = 0xe303, USB_XU_DEVICE_OPTIONS = 0xe304, USB_XU_DIRECT_MONITORING = 0xe305, USB_XU_METERING = 0xe306 }; enum { USB_XU_CLOCK_SOURCE_SELECTOR = 0x02, /* clock source*/ USB_XU_CLOCK_RATE_SELECTOR = 0x03, /* clock rate */ USB_XU_DIGITAL_FORMAT_SELECTOR = 0x01, /* the spdif format */ USB_XU_SOFT_LIMIT_SELECTOR = 0x03 /* soft limiter */ }; /* * manual mapping of mixer names * if the mixer topology is too complicated and the parsed names are * ambiguous, add the entries in usbmixer_maps.c. */ #include "mixer_maps.c" static const struct usbmix_name_map * find_map(struct mixer_build *state, int unitid, int control) { const struct usbmix_name_map *p = state->map; if (!p) return NULL; for (p = state->map; p->id; p++) { if (p->id == unitid && (!control || !p->control || control == p->control)) return p; } return NULL; } /* get the mapped name if the unit matches */ static int check_mapped_name(const struct usbmix_name_map *p, char *buf, int buflen) { if (!p || !p->name) return 0; buflen--; return strlcpy(buf, p->name, buflen); } /* check whether the control should be ignored */ static inline int check_ignored_ctl(const struct usbmix_name_map *p) { if (!p || p->name || p->dB) return 0; return 1; } /* dB mapping */ static inline void check_mapped_dB(const struct usbmix_name_map *p, struct usb_mixer_elem_info *cval) { if (p && p->dB) { cval->dBmin = p->dB->min; cval->dBmax = p->dB->max; cval->initialized = 1; } } /* get the mapped selector source name */ static int check_mapped_selector_name(struct mixer_build *state, int unitid, int index, char *buf, int buflen) { const struct usbmix_selector_map *p; if (! state->selector_map) return 0; for (p = state->selector_map; p->id; p++) { if (p->id == unitid && index < p->count) return strlcpy(buf, p->names[index], buflen); } return 0; } /* * find an audio control unit with the given unit id */ static void *find_audio_control_unit(struct mixer_build *state, unsigned char unit) { /* we just parse the header */ struct uac_feature_unit_descriptor *hdr = NULL; while ((hdr = snd_usb_find_desc(state->buffer, state->buflen, hdr, USB_DT_CS_INTERFACE)) != NULL) { if (hdr->bLength >= 4 && hdr->bDescriptorSubtype >= UAC_INPUT_TERMINAL && hdr->bDescriptorSubtype <= UAC2_SAMPLE_RATE_CONVERTER && hdr->bUnitID == unit) return hdr; } return NULL; } /* * copy a string with the given id */ static int snd_usb_copy_string_desc(struct mixer_build *state, int index, char *buf, int maxlen) { int len = usb_string(state->chip->dev, index, buf, maxlen - 1); buf[len] = 0; return len; } /* * convert from the byte/word on usb descriptor to the zero-based integer */ static int convert_signed_value(struct usb_mixer_elem_info *cval, int val) { switch (cval->val_type) { case USB_MIXER_BOOLEAN: return !!val; case USB_MIXER_INV_BOOLEAN: return !val; case USB_MIXER_U8: val &= 0xff; break; case USB_MIXER_S8: val &= 0xff; if (val >= 0x80) val -= 0x100; break; case USB_MIXER_U16: val &= 0xffff; break; case USB_MIXER_S16: val &= 0xffff; if (val >= 0x8000) val -= 0x10000; break; } return val; } /* * convert from the zero-based int to the byte/word for usb descriptor */ static int convert_bytes_value(struct usb_mixer_elem_info *cval, int val) { switch (cval->val_type) { case USB_MIXER_BOOLEAN: return !!val; case USB_MIXER_INV_BOOLEAN: return !val; case USB_MIXER_S8: case USB_MIXER_U8: return val & 0xff; case USB_MIXER_S16: case USB_MIXER_U16: return val & 0xffff; } return 0; /* not reached */ } static int get_relative_value(struct usb_mixer_elem_info *cval, int val) { if (! cval->res) cval->res = 1; if (val < cval->min) return 0; else if (val >= cval->max) return (cval->max - cval->min + cval->res - 1) / cval->res; else return (val - cval->min) / cval->res; } static int get_abs_value(struct usb_mixer_elem_info *cval, int val) { if (val < 0) return cval->min; if (! cval->res) cval->res = 1; val *= cval->res; val += cval->min; if (val > cval->max) return cval->max; return val; } /* * retrieve a mixer value */ static int get_ctl_value_v1(struct usb_mixer_elem_info *cval, int request, int validx, int *value_ret) { struct snd_usb_audio *chip = cval->mixer->chip; unsigned char buf[2]; int val_len = cval->val_type >= USB_MIXER_S16 ? 2 : 1; int timeout = 10; int idx = 0, err; err = snd_usb_autoresume(cval->mixer->chip); if (err < 0) return -EIO; down_read(&chip->shutdown_rwsem); while (timeout-- > 0) { if (chip->shutdown) break; idx = snd_usb_ctrl_intf(chip) | (cval->id << 8); if (snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), request, USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN, validx, idx, buf, val_len) >= val_len) { *value_ret = convert_signed_value(cval, snd_usb_combine_bytes(buf, val_len)); err = 0; goto out; } } snd_printdd(KERN_ERR "cannot get ctl value: req = %#x, wValue = %#x, wIndex = %#x, type = %d\n", request, validx, idx, cval->val_type); err = -EINVAL; out: up_read(&chip->shutdown_rwsem); snd_usb_autosuspend(cval->mixer->chip); return err; } static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request, int validx, int *value_ret) { struct snd_usb_audio *chip = cval->mixer->chip; unsigned char buf[2 + 3*sizeof(__u16)]; /* enough space for one range */ unsigned char *val; int idx = 0, ret, size; __u8 bRequest; if (request == UAC_GET_CUR) { bRequest = UAC2_CS_CUR; size = sizeof(__u16); } else { bRequest = UAC2_CS_RANGE; size = sizeof(buf); } memset(buf, 0, sizeof(buf)); ret = snd_usb_autoresume(chip) ? -EIO : 0; if (ret) goto error; down_read(&chip->shutdown_rwsem); if (chip->shutdown) ret = -ENODEV; else { idx = snd_usb_ctrl_intf(chip) | (cval->id << 8); ret = snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), bRequest, USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN, validx, idx, buf, size); } up_read(&chip->shutdown_rwsem); snd_usb_autosuspend(chip); if (ret < 0) { error: snd_printk(KERN_ERR "cannot get ctl value: req = %#x, wValue = %#x, wIndex = %#x, type = %d\n", request, validx, idx, cval->val_type); return ret; } /* FIXME: how should we handle multiple triplets here? */ switch (request) { case UAC_GET_CUR: val = buf; break; case UAC_GET_MIN: val = buf + sizeof(__u16); break; case UAC_GET_MAX: val = buf + sizeof(__u16) * 2; break; case UAC_GET_RES: val = buf + sizeof(__u16) * 3; break; default: return -EINVAL; } *value_ret = convert_signed_value(cval, snd_usb_combine_bytes(val, sizeof(__u16))); return 0; } static int get_ctl_value(struct usb_mixer_elem_info *cval, int request, int validx, int *value_ret) { return (cval->mixer->protocol == UAC_VERSION_1) ? get_ctl_value_v1(cval, request, validx, value_ret) : get_ctl_value_v2(cval, request, validx, value_ret); } static int get_cur_ctl_value(struct usb_mixer_elem_info *cval, int validx, int *value) { return get_ctl_value(cval, UAC_GET_CUR, validx, value); } /* channel = 0: master, 1 = first channel */ static inline int get_cur_mix_raw(struct usb_mixer_elem_info *cval, int channel, int *value) { return get_ctl_value(cval, UAC_GET_CUR, (cval->control << 8) | channel, value); } static int get_cur_mix_value(struct usb_mixer_elem_info *cval, int channel, int index, int *value) { int err; if (cval->cached & (1 << channel)) { *value = cval->cache_val[index]; return 0; } err = get_cur_mix_raw(cval, channel, value); if (err < 0) { if (!cval->mixer->ignore_ctl_error) snd_printd(KERN_ERR "cannot get current value for control %d ch %d: err = %d\n", cval->control, channel, err); return err; } cval->cached |= 1 << channel; cval->cache_val[index] = *value; return 0; } /* * set a mixer value */ int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval, int request, int validx, int value_set) { struct snd_usb_audio *chip = cval->mixer->chip; unsigned char buf[2]; int idx = 0, val_len, err, timeout = 10; if (cval->mixer->protocol == UAC_VERSION_1) { val_len = cval->val_type >= USB_MIXER_S16 ? 2 : 1; } else { /* UAC_VERSION_2 */ /* audio class v2 controls are always 2 bytes in size */ val_len = sizeof(__u16); /* FIXME */ if (request != UAC_SET_CUR) { snd_printdd(KERN_WARNING "RANGE setting not yet supported\n"); return -EINVAL; } request = UAC2_CS_CUR; } value_set = convert_bytes_value(cval, value_set); buf[0] = value_set & 0xff; buf[1] = (value_set >> 8) & 0xff; err = snd_usb_autoresume(chip); if (err < 0) return -EIO; down_read(&chip->shutdown_rwsem); while (timeout-- > 0) { if (chip->shutdown) break; idx = snd_usb_ctrl_intf(chip) | (cval->id << 8); if (snd_usb_ctl_msg(chip->dev, usb_sndctrlpipe(chip->dev, 0), request, USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT, validx, idx, buf, val_len) >= 0) { err = 0; goto out; } } snd_printdd(KERN_ERR "cannot set ctl value: req = %#x, wValue = %#x, wIndex = %#x, type = %d, data = %#x/%#x\n", request, validx, idx, cval->val_type, buf[0], buf[1]); err = -EINVAL; out: up_read(&chip->shutdown_rwsem); snd_usb_autosuspend(chip); return err; } static int set_cur_ctl_value(struct usb_mixer_elem_info *cval, int validx, int value) { return snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR, validx, value); } static int set_cur_mix_value(struct usb_mixer_elem_info *cval, int channel, int index, int value) { int err; unsigned int read_only = (channel == 0) ? cval->master_readonly : cval->ch_readonly & (1 << (channel - 1)); if (read_only) { snd_printdd(KERN_INFO "%s(): channel %d of control %d is read_only\n", __func__, channel, cval->control); return 0; } err = snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR, (cval->control << 8) | channel, value); if (err < 0) return err; cval->cached |= 1 << channel; cval->cache_val[index] = value; return 0; } /* * TLV callback for mixer volume controls */ static int mixer_vol_tlv(struct snd_kcontrol *kcontrol, int op_flag, unsigned int size, unsigned int __user *_tlv) { struct usb_mixer_elem_info *cval = kcontrol->private_data; DECLARE_TLV_DB_MINMAX(scale, 0, 0); if (size < sizeof(scale)) return -ENOMEM; scale[2] = cval->dBmin; scale[3] = cval->dBmax; if (copy_to_user(_tlv, scale, sizeof(scale))) return -EFAULT; return 0; } /* * parser routines begin here... */ static int parse_audio_unit(struct mixer_build *state, int unitid); /* * check if the input/output channel routing is enabled on the given bitmap. * used for mixer unit parser */ static int check_matrix_bitmap(unsigned char *bmap, int ich, int och, int num_outs) { int idx = ich * num_outs + och; return bmap[idx >> 3] & (0x80 >> (idx & 7)); } /* * add an alsa control element * search and increment the index until an empty slot is found. * * if failed, give up and free the control instance. */ int snd_usb_mixer_add_control(struct usb_mixer_interface *mixer, struct snd_kcontrol *kctl) { struct usb_mixer_elem_info *cval = kctl->private_data; int err; while (snd_ctl_find_id(mixer->chip->card, &kctl->id)) kctl->id.index++; if ((err = snd_ctl_add(mixer->chip->card, kctl)) < 0) { snd_printd(KERN_ERR "cannot add control (err = %d)\n", err); return err; } cval->elem_id = &kctl->id; cval->next_id_elem = mixer->id_elems[cval->id]; mixer->id_elems[cval->id] = cval; return 0; } /* * get a terminal name string */ static struct iterm_name_combo { int type; char *name; } iterm_names[] = { { 0x0300, "Output" }, { 0x0301, "Speaker" }, { 0x0302, "Headphone" }, { 0x0303, "HMD Audio" }, { 0x0304, "Desktop Speaker" }, { 0x0305, "Room Speaker" }, { 0x0306, "Com Speaker" }, { 0x0307, "LFE" }, { 0x0600, "External In" }, { 0x0601, "Analog In" }, { 0x0602, "Digital In" }, { 0x0603, "Line" }, { 0x0604, "Legacy In" }, { 0x0605, "IEC958 In" }, { 0x0606, "1394 DA Stream" }, { 0x0607, "1394 DV Stream" }, { 0x0700, "Embedded" }, { 0x0701, "Noise Source" }, { 0x0702, "Equalization Noise" }, { 0x0703, "CD" }, { 0x0704, "DAT" }, { 0x0705, "DCC" }, { 0x0706, "MiniDisk" }, { 0x0707, "Analog Tape" }, { 0x0708, "Phonograph" }, { 0x0709, "VCR Audio" }, { 0x070a, "Video Disk Audio" }, { 0x070b, "DVD Audio" }, { 0x070c, "TV Tuner Audio" }, { 0x070d, "Satellite Rec Audio" }, { 0x070e, "Cable Tuner Audio" }, { 0x070f, "DSS Audio" }, { 0x0710, "Radio Receiver" }, { 0x0711, "Radio Transmitter" }, { 0x0712, "Multi-Track Recorder" }, { 0x0713, "Synthesizer" }, { 0 }, }; static int get_term_name(struct mixer_build *state, struct usb_audio_term *iterm, unsigned char *name, int maxlen, int term_only) { struct iterm_name_combo *names; if (iterm->name) return snd_usb_copy_string_desc(state, iterm->name, name, maxlen); /* virtual type - not a real terminal */ if (iterm->type >> 16) { if (term_only) return 0; switch (iterm->type >> 16) { case UAC_SELECTOR_UNIT: strcpy(name, "Selector"); return 8; case UAC1_PROCESSING_UNIT: strcpy(name, "Process Unit"); return 12; case UAC1_EXTENSION_UNIT: strcpy(name, "Ext Unit"); return 8; case UAC_MIXER_UNIT: strcpy(name, "Mixer"); return 5; default: return sprintf(name, "Unit %d", iterm->id); } } switch (iterm->type & 0xff00) { case 0x0100: strcpy(name, "PCM"); return 3; case 0x0200: strcpy(name, "Mic"); return 3; case 0x0400: strcpy(name, "Headset"); return 7; case 0x0500: strcpy(name, "Phone"); return 5; } for (names = iterm_names; names->type; names++) if (names->type == iterm->type) { strcpy(name, names->name); return strlen(names->name); } return 0; } /* * parse the source unit recursively until it reaches to a terminal * or a branched unit. */ static int check_input_term(struct mixer_build *state, int id, struct usb_audio_term *term) { int err; void *p1; memset(term, 0, sizeof(*term)); while ((p1 = find_audio_control_unit(state, id)) != NULL) { unsigned char *hdr = p1; term->id = id; switch (hdr[2]) { case UAC_INPUT_TERMINAL: if (state->mixer->protocol == UAC_VERSION_1) { struct uac_input_terminal_descriptor *d = p1; term->type = le16_to_cpu(d->wTerminalType); term->channels = d->bNrChannels; term->chconfig = le16_to_cpu(d->wChannelConfig); term->name = d->iTerminal; } else { /* UAC_VERSION_2 */ struct uac2_input_terminal_descriptor *d = p1; term->type = le16_to_cpu(d->wTerminalType); term->channels = d->bNrChannels; term->chconfig = le32_to_cpu(d->bmChannelConfig); term->name = d->iTerminal; /* call recursively to get the clock selectors */ err = check_input_term(state, d->bCSourceID, term); if (err < 0) return err; } return 0; case UAC_FEATURE_UNIT: { /* the header is the same for v1 and v2 */ struct uac_feature_unit_descriptor *d = p1; id = d->bSourceID; break; /* continue to parse */ } case UAC_MIXER_UNIT: { struct uac_mixer_unit_descriptor *d = p1; term->type = d->bDescriptorSubtype << 16; /* virtual type */ term->channels = uac_mixer_unit_bNrChannels(d); term->chconfig = uac_mixer_unit_wChannelConfig(d, state->mixer->protocol); term->name = uac_mixer_unit_iMixer(d); return 0; } case UAC_SELECTOR_UNIT: case UAC2_CLOCK_SELECTOR: { struct uac_selector_unit_descriptor *d = p1; /* call recursively to retrieve the channel info */ err = check_input_term(state, d->baSourceID[0], term); if (err < 0) return err; term->type = d->bDescriptorSubtype << 16; /* virtual type */ term->id = id; term->name = uac_selector_unit_iSelector(d); return 0; } case UAC1_PROCESSING_UNIT: case UAC1_EXTENSION_UNIT: /* UAC2_PROCESSING_UNIT_V2 */ /* UAC2_EFFECT_UNIT */ case UAC2_EXTENSION_UNIT_V2: { struct uac_processing_unit_descriptor *d = p1; if (state->mixer->protocol == UAC_VERSION_2 && hdr[2] == UAC2_EFFECT_UNIT) { /* UAC2/UAC1 unit IDs overlap here in an * uncompatible way. Ignore this unit for now. */ return 0; } if (d->bNrInPins) { id = d->baSourceID[0]; break; /* continue to parse */ } term->type = d->bDescriptorSubtype << 16; /* virtual type */ term->channels = uac_processing_unit_bNrChannels(d); term->chconfig = uac_processing_unit_wChannelConfig(d, state->mixer->protocol); term->name = uac_processing_unit_iProcessing(d, state->mixer->protocol); return 0; } case UAC2_CLOCK_SOURCE: { struct uac_clock_source_descriptor *d = p1; term->type = d->bDescriptorSubtype << 16; /* virtual type */ term->id = id; term->name = d->iClockSource; return 0; } default: return -ENODEV; } } return -ENODEV; } /* * Feature Unit */ /* feature unit control information */ struct usb_feature_control_info { const char *name; unsigned int type; /* control type (mute, volume, etc.) */ }; static struct usb_feature_control_info audio_feature_info[] = { { "Mute", USB_MIXER_INV_BOOLEAN }, { "Volume", USB_MIXER_S16 }, { "Tone Control - Bass", USB_MIXER_S8 }, { "Tone Control - Mid", USB_MIXER_S8 }, { "Tone Control - Treble", USB_MIXER_S8 }, { "Graphic Equalizer", USB_MIXER_S8 }, /* FIXME: not implemeted yet */ { "Auto Gain Control", USB_MIXER_BOOLEAN }, { "Delay Control", USB_MIXER_U16 }, { "Bass Boost", USB_MIXER_BOOLEAN }, { "Loudness", USB_MIXER_BOOLEAN }, /* UAC2 specific */ { "Input Gain Control", USB_MIXER_U16 }, { "Input Gain Pad Control", USB_MIXER_BOOLEAN }, { "Phase Inverter Control", USB_MIXER_BOOLEAN }, }; /* private_free callback */ static void usb_mixer_elem_free(struct snd_kcontrol *kctl) { kfree(kctl->private_data); kctl->private_data = NULL; } /* * interface to ALSA control for feature/mixer units */ /* volume control quirks */ static void volume_control_quirks(struct usb_mixer_elem_info *cval, struct snd_kcontrol *kctl) { switch (cval->mixer->chip->usb_id) { case USB_ID(0x0471, 0x0101): case USB_ID(0x0471, 0x0104): case USB_ID(0x0471, 0x0105): case USB_ID(0x0672, 0x1041): /* quirk for UDA1321/N101. * note that detection between firmware 2.1.1.7 (N101) * and later 2.1.1.21 is not very clear from datasheets. * I hope that the min value is -15360 for newer firmware --jk */ if (!strcmp(kctl->id.name, "PCM Playback Volume") && cval->min == -15616) { snd_printk(KERN_INFO "set volume quirk for UDA1321/N101 chip\n"); cval->max = -256; } break; case USB_ID(0x046d, 0x09a4): if (!strcmp(kctl->id.name, "Mic Capture Volume")) { snd_printk(KERN_INFO "set volume quirk for QuickCam E3500\n"); cval->min = 6080; cval->max = 8768; cval->res = 192; } break; case USB_ID(0x046d, 0x0807): /* Logitech Webcam C500 */ case USB_ID(0x046d, 0x0808): case USB_ID(0x046d, 0x0809): case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */ case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */ case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */ case USB_ID(0x046d, 0x0991): /* Most audio usb devices lie about volume resolution. * Most Logitech webcams have res = 384. * Proboly there is some logitech magic behind this number --fishor */ if (!strcmp(kctl->id.name, "Mic Capture Volume")) { snd_printk(KERN_INFO "set resolution quirk: cval->res = 384\n"); cval->res = 384; } break; } } /* * retrieve the minimum and maximum values for the specified control */ static int get_min_max_with_quirks(struct usb_mixer_elem_info *cval, int default_min, struct snd_kcontrol *kctl) { /* for failsafe */ cval->min = default_min; cval->max = cval->min + 1; cval->res = 1; cval->dBmin = cval->dBmax = 0; if (cval->val_type == USB_MIXER_BOOLEAN || cval->val_type == USB_MIXER_INV_BOOLEAN) { cval->initialized = 1; } else { int minchn = 0; if (cval->cmask) { int i; for (i = 0; i < MAX_CHANNELS; i++) if (cval->cmask & (1 << i)) { minchn = i + 1; break; } } if (get_ctl_value(cval, UAC_GET_MAX, (cval->control << 8) | minchn, &cval->max) < 0 || get_ctl_value(cval, UAC_GET_MIN, (cval->control << 8) | minchn, &cval->min) < 0) { snd_printd(KERN_ERR "%d:%d: cannot get min/max values for control %d (id %d)\n", cval->id, snd_usb_ctrl_intf(cval->mixer->chip), cval->control, cval->id); return -EINVAL; } if (get_ctl_value(cval, UAC_GET_RES, (cval->control << 8) | minchn, &cval->res) < 0) { cval->res = 1; } else { int last_valid_res = cval->res; while (cval->res > 1) { if (snd_usb_mixer_set_ctl_value(cval, UAC_SET_RES, (cval->control << 8) | minchn, cval->res / 2) < 0) break; cval->res /= 2; } if (get_ctl_value(cval, UAC_GET_RES, (cval->control << 8) | minchn, &cval->res) < 0) cval->res = last_valid_res; } if (cval->res == 0) cval->res = 1; /* Additional checks for the proper resolution * * Some devices report smaller resolutions than actually * reacting. They don't return errors but simply clip * to the lower aligned value. */ if (cval->min + cval->res < cval->max) { int last_valid_res = cval->res; int saved, test, check; get_cur_mix_raw(cval, minchn, &saved); for (;;) { test = saved; if (test < cval->max) test += cval->res; else test -= cval->res; if (test < cval->min || test > cval->max || set_cur_mix_value(cval, minchn, 0, test) || get_cur_mix_raw(cval, minchn, &check)) { cval->res = last_valid_res; break; } if (test == check) break; cval->res *= 2; } set_cur_mix_value(cval, minchn, 0, saved); } cval->initialized = 1; } if (kctl) volume_control_quirks(cval, kctl); /* USB descriptions contain the dB scale in 1/256 dB unit * while ALSA TLV contains in 1/100 dB unit */ cval->dBmin = (convert_signed_value(cval, cval->min) * 100) / (cval->res); cval->dBmax = (convert_signed_value(cval, cval->max) * 100) / (cval->res); if (cval->dBmin > cval->dBmax) { /* something is wrong; assume it's either from/to 0dB */ if (cval->dBmin < 0) cval->dBmax = 0; else if (cval->dBmin > 0) cval->dBmin = 0; if (cval->dBmin > cval->dBmax) { /* totally crap, return an error */ return -EINVAL; } } return 0; } #define get_min_max(cval, def) get_min_max_with_quirks(cval, def, NULL) /* get a feature/mixer unit info */ static int mixer_ctl_feature_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct usb_mixer_elem_info *cval = kcontrol->private_data; if (cval->val_type == USB_MIXER_BOOLEAN || cval->val_type == USB_MIXER_INV_BOOLEAN) uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; else uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = cval->channels; if (cval->val_type == USB_MIXER_BOOLEAN || cval->val_type == USB_MIXER_INV_BOOLEAN) { uinfo->value.integer.min = 0; uinfo->value.integer.max = 1; } else { if (!cval->initialized) { get_min_max_with_quirks(cval, 0, kcontrol); if (cval->initialized && cval->dBmin >= cval->dBmax) { kcontrol->vd[0].access &= ~(SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK); snd_ctl_notify(cval->mixer->chip->card, SNDRV_CTL_EVENT_MASK_INFO, &kcontrol->id); } } uinfo->value.integer.min = 0; uinfo->value.integer.max = (cval->max - cval->min + cval->res - 1) / cval->res; } return 0; } /* get the current value from feature/mixer unit */ static int mixer_ctl_feature_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_elem_info *cval = kcontrol->private_data; int c, cnt, val, err; ucontrol->value.integer.value[0] = cval->min; if (cval->cmask) { cnt = 0; for (c = 0; c < MAX_CHANNELS; c++) { if (!(cval->cmask & (1 << c))) continue; err = get_cur_mix_value(cval, c + 1, cnt, &val); if (err < 0) return cval->mixer->ignore_ctl_error ? 0 : err; val = get_relative_value(cval, val); ucontrol->value.integer.value[cnt] = val; cnt++; } return 0; } else { /* master channel */ err = get_cur_mix_value(cval, 0, 0, &val); if (err < 0) return cval->mixer->ignore_ctl_error ? 0 : err; val = get_relative_value(cval, val); ucontrol->value.integer.value[0] = val; } return 0; } /* put the current value to feature/mixer unit */ static int mixer_ctl_feature_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_elem_info *cval = kcontrol->private_data; int c, cnt, val, oval, err; int changed = 0; if (cval->cmask) { cnt = 0; for (c = 0; c < MAX_CHANNELS; c++) { if (!(cval->cmask & (1 << c))) continue; err = get_cur_mix_value(cval, c + 1, cnt, &oval); if (err < 0) return cval->mixer->ignore_ctl_error ? 0 : err; val = ucontrol->value.integer.value[cnt]; val = get_abs_value(cval, val); if (oval != val) { set_cur_mix_value(cval, c + 1, cnt, val); changed = 1; } cnt++; } } else { /* master channel */ err = get_cur_mix_value(cval, 0, 0, &oval); if (err < 0) return cval->mixer->ignore_ctl_error ? 0 : err; val = ucontrol->value.integer.value[0]; val = get_abs_value(cval, val); if (val != oval) { set_cur_mix_value(cval, 0, 0, val); changed = 1; } } return changed; } static struct snd_kcontrol_new usb_feature_unit_ctl = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "", /* will be filled later manually */ .info = mixer_ctl_feature_info, .get = mixer_ctl_feature_get, .put = mixer_ctl_feature_put, }; /* the read-only variant */ static struct snd_kcontrol_new usb_feature_unit_ctl_ro = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "", /* will be filled later manually */ .info = mixer_ctl_feature_info, .get = mixer_ctl_feature_get, .put = NULL, }; /* This symbol is exported in order to allow the mixer quirks to * hook up to the standard feature unit control mechanism */ struct snd_kcontrol_new *snd_usb_feature_unit_ctl = &usb_feature_unit_ctl; /* * build a feature control */ static size_t append_ctl_name(struct snd_kcontrol *kctl, const char *str) { return strlcat(kctl->id.name, str, sizeof(kctl->id.name)); } static void build_feature_ctl(struct mixer_build *state, void *raw_desc, unsigned int ctl_mask, int control, struct usb_audio_term *iterm, int unitid, int readonly_mask) { struct uac_feature_unit_descriptor *desc = raw_desc; unsigned int len = 0; int mapped_name = 0; int nameid = uac_feature_unit_iFeature(desc); struct snd_kcontrol *kctl; struct usb_mixer_elem_info *cval; const struct usbmix_name_map *map; unsigned int range; control++; /* change from zero-based to 1-based value */ if (control == UAC_FU_GRAPHIC_EQUALIZER) { /* FIXME: not supported yet */ return; } map = find_map(state, unitid, control); if (check_ignored_ctl(map)) return; cval = kzalloc(sizeof(*cval), GFP_KERNEL); if (! cval) { snd_printk(KERN_ERR "cannot malloc kcontrol\n"); return; } cval->mixer = state->mixer; cval->id = unitid; cval->control = control; cval->cmask = ctl_mask; cval->val_type = audio_feature_info[control-1].type; if (ctl_mask == 0) { cval->channels = 1; /* master channel */ cval->master_readonly = readonly_mask; } else { int i, c = 0; for (i = 0; i < 16; i++) if (ctl_mask & (1 << i)) c++; cval->channels = c; cval->ch_readonly = readonly_mask; } /* if all channels in the mask are marked read-only, make the control * read-only. set_cur_mix_value() will check the mask again and won't * issue write commands to read-only channels. */ if (cval->channels == readonly_mask) kctl = snd_ctl_new1(&usb_feature_unit_ctl_ro, cval); else kctl = snd_ctl_new1(&usb_feature_unit_ctl, cval); if (! kctl) { snd_printk(KERN_ERR "cannot malloc kcontrol\n"); kfree(cval); return; } kctl->private_free = usb_mixer_elem_free; len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name)); mapped_name = len != 0; if (! len && nameid) len = snd_usb_copy_string_desc(state, nameid, kctl->id.name, sizeof(kctl->id.name)); /* get min/max values */ get_min_max_with_quirks(cval, 0, kctl); switch (control) { case UAC_FU_MUTE: case UAC_FU_VOLUME: /* determine the control name. the rule is: * - if a name id is given in descriptor, use it. * - if the connected input can be determined, then use the name * of terminal type. * - if the connected output can be determined, use it. * - otherwise, anonymous name. */ if (! len) { len = get_term_name(state, iterm, kctl->id.name, sizeof(kctl->id.name), 1); if (! len) len = get_term_name(state, &state->oterm, kctl->id.name, sizeof(kctl->id.name), 1); if (! len) len = snprintf(kctl->id.name, sizeof(kctl->id.name), "Feature %d", unitid); } /* determine the stream direction: * if the connected output is USB stream, then it's likely a * capture stream. otherwise it should be playback (hopefully :) */ if (! mapped_name && ! (state->oterm.type >> 16)) { if ((state->oterm.type & 0xff00) == 0x0100) { len = append_ctl_name(kctl, " Capture"); } else { len = append_ctl_name(kctl, " Playback"); } } append_ctl_name(kctl, control == UAC_FU_MUTE ? " Switch" : " Volume"); if (control == UAC_FU_VOLUME) { check_mapped_dB(map, cval); if (cval->dBmin < cval->dBmax || !cval->initialized) { kctl->tlv.c = mixer_vol_tlv; kctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK; } } break; default: if (! len) strlcpy(kctl->id.name, audio_feature_info[control-1].name, sizeof(kctl->id.name)); break; } range = (cval->max - cval->min) / cval->res; /* Are there devices with volume range more than 255? I use a bit more * to be sure. 384 is a resolution magic number found on Logitech * devices. It will definitively catch all buggy Logitech devices. */ if (range > 384) { snd_printk(KERN_WARNING "usb_audio: Warning! Unlikely big " "volume range (=%u), cval->res is probably wrong.", range); snd_printk(KERN_WARNING "usb_audio: [%d] FU [%s] ch = %d, " "val = %d/%d/%d", cval->id, kctl->id.name, cval->channels, cval->min, cval->max, cval->res); } snd_printdd(KERN_INFO "[%d] FU [%s] ch = %d, val = %d/%d/%d\n", cval->id, kctl->id.name, cval->channels, cval->min, cval->max, cval->res); snd_usb_mixer_add_control(state->mixer, kctl); } /* * parse a feature unit * * most of controls are defined here. */ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, void *_ftr) { int channels, i, j; struct usb_audio_term iterm; unsigned int master_bits, first_ch_bits; int err, csize; struct uac_feature_unit_descriptor *hdr = _ftr; __u8 *bmaControls; if (state->mixer->protocol == UAC_VERSION_1) { csize = hdr->bControlSize; if (!csize) { snd_printdd(KERN_ERR "usbaudio: unit %u: " "invalid bControlSize == 0\n", unitid); return -EINVAL; } channels = (hdr->bLength - 7) / csize - 1; bmaControls = hdr->bmaControls; if (hdr->bLength < 7 + csize) { snd_printk(KERN_ERR "usbaudio: unit %u: " "invalid UAC_FEATURE_UNIT descriptor\n", unitid); return -EINVAL; } } else { struct uac2_feature_unit_descriptor *ftr = _ftr; csize = 4; channels = (hdr->bLength - 6) / 4 - 1; bmaControls = ftr->bmaControls; if (hdr->bLength < 6 + csize) { snd_printk(KERN_ERR "usbaudio: unit %u: " "invalid UAC_FEATURE_UNIT descriptor\n", unitid); return -EINVAL; } } /* parse the source unit */ if ((err = parse_audio_unit(state, hdr->bSourceID)) < 0) return err; /* determine the input source type and name */ err = check_input_term(state, hdr->bSourceID, &iterm); if (err < 0) return err; master_bits = snd_usb_combine_bytes(bmaControls, csize); /* master configuration quirks */ switch (state->chip->usb_id) { case USB_ID(0x08bb, 0x2702): snd_printk(KERN_INFO "usbmixer: master volume quirk for PCM2702 chip\n"); /* disable non-functional volume control */ master_bits &= ~UAC_CONTROL_BIT(UAC_FU_VOLUME); break; case USB_ID(0x1130, 0xf211): snd_printk(KERN_INFO "usbmixer: volume control quirk for Tenx TP6911 Audio Headset\n"); /* disable non-functional volume control */ channels = 0; break; } if (channels > 0) first_ch_bits = snd_usb_combine_bytes(bmaControls + csize, csize); else first_ch_bits = 0; if (state->mixer->protocol == UAC_VERSION_1) { /* check all control types */ for (i = 0; i < 10; i++) { unsigned int ch_bits = 0; for (j = 0; j < channels; j++) { unsigned int mask = snd_usb_combine_bytes(bmaControls + csize * (j+1), csize); if (mask & (1 << i)) ch_bits |= (1 << j); } /* audio class v1 controls are never read-only */ if (ch_bits & 1) /* the first channel must be set (for ease of programming) */ build_feature_ctl(state, _ftr, ch_bits, i, &iterm, unitid, 0); if (master_bits & (1 << i)) build_feature_ctl(state, _ftr, 0, i, &iterm, unitid, 0); } } else { /* UAC_VERSION_2 */ for (i = 0; i < ARRAY_SIZE(audio_feature_info); i++) { unsigned int ch_bits = 0; unsigned int ch_read_only = 0; for (j = 0; j < channels; j++) { unsigned int mask = snd_usb_combine_bytes(bmaControls + csize * (j+1), csize); if (uac2_control_is_readable(mask, i)) { ch_bits |= (1 << j); if (!uac2_control_is_writeable(mask, i)) ch_read_only |= (1 << j); } } /* NOTE: build_feature_ctl() will mark the control read-only if all channels * are marked read-only in the descriptors. Otherwise, the control will be * reported as writeable, but the driver will not actually issue a write * command for read-only channels */ if (ch_bits & 1) /* the first channel must be set (for ease of programming) */ build_feature_ctl(state, _ftr, ch_bits, i, &iterm, unitid, ch_read_only); if (uac2_control_is_readable(master_bits, i)) build_feature_ctl(state, _ftr, 0, i, &iterm, unitid, !uac2_control_is_writeable(master_bits, i)); } } return 0; } /* * Mixer Unit */ /* * build a mixer unit control * * the callbacks are identical with feature unit. * input channel number (zero based) is given in control field instead. */ static void build_mixer_unit_ctl(struct mixer_build *state, struct uac_mixer_unit_descriptor *desc, int in_pin, int in_ch, int unitid, struct usb_audio_term *iterm) { struct usb_mixer_elem_info *cval; unsigned int num_outs = uac_mixer_unit_bNrChannels(desc); unsigned int i, len; struct snd_kcontrol *kctl; const struct usbmix_name_map *map; map = find_map(state, unitid, 0); if (check_ignored_ctl(map)) return; cval = kzalloc(sizeof(*cval), GFP_KERNEL); if (! cval) return; cval->mixer = state->mixer; cval->id = unitid; cval->control = in_ch + 1; /* based on 1 */ cval->val_type = USB_MIXER_S16; for (i = 0; i < num_outs; i++) { if (check_matrix_bitmap(uac_mixer_unit_bmControls(desc, state->mixer->protocol), in_ch, i, num_outs)) { cval->cmask |= (1 << i); cval->channels++; } } /* get min/max values */ get_min_max(cval, 0); kctl = snd_ctl_new1(&usb_feature_unit_ctl, cval); if (! kctl) { snd_printk(KERN_ERR "cannot malloc kcontrol\n"); kfree(cval); return; } kctl->private_free = usb_mixer_elem_free; len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name)); if (! len) len = get_term_name(state, iterm, kctl->id.name, sizeof(kctl->id.name), 0); if (! len) len = sprintf(kctl->id.name, "Mixer Source %d", in_ch + 1); append_ctl_name(kctl, " Volume"); snd_printdd(KERN_INFO "[%d] MU [%s] ch = %d, val = %d/%d\n", cval->id, kctl->id.name, cval->channels, cval->min, cval->max); snd_usb_mixer_add_control(state->mixer, kctl); } /* * parse a mixer unit */ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid, void *raw_desc) { struct uac_mixer_unit_descriptor *desc = raw_desc; struct usb_audio_term iterm; int input_pins, num_ins, num_outs; int pin, ich, err; if (desc->bLength < 11 || ! (input_pins = desc->bNrInPins) || ! (num_outs = uac_mixer_unit_bNrChannels(desc))) { snd_printk(KERN_ERR "invalid MIXER UNIT descriptor %d\n", unitid); return -EINVAL; } /* no bmControls field (e.g. Maya44) -> ignore */ if (desc->bLength <= 10 + input_pins) { snd_printdd(KERN_INFO "MU %d has no bmControls field\n", unitid); return 0; } num_ins = 0; ich = 0; for (pin = 0; pin < input_pins; pin++) { err = parse_audio_unit(state, desc->baSourceID[pin]); if (err < 0) return err; err = check_input_term(state, desc->baSourceID[pin], &iterm); if (err < 0) return err; num_ins += iterm.channels; for (; ich < num_ins; ++ich) { int och, ich_has_controls = 0; for (och = 0; och < num_outs; ++och) { if (check_matrix_bitmap(uac_mixer_unit_bmControls(desc, state->mixer->protocol), ich, och, num_outs)) { ich_has_controls = 1; break; } } if (ich_has_controls) build_mixer_unit_ctl(state, desc, pin, ich, unitid, &iterm); } } return 0; } /* * Processing Unit / Extension Unit */ /* get callback for processing/extension unit */ static int mixer_ctl_procunit_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_elem_info *cval = kcontrol->private_data; int err, val = 0; err = get_cur_ctl_value(cval, cval->control << 8, &val); if (err < 0 && cval->mixer->ignore_ctl_error) { ucontrol->value.integer.value[0] = cval->min; return 0; } if (err < 0) return err; val = get_relative_value(cval, val); ucontrol->value.integer.value[0] = val; return 0; } /* put callback for processing/extension unit */ static int mixer_ctl_procunit_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_elem_info *cval = kcontrol->private_data; int val, oval = 0, err; err = get_cur_ctl_value(cval, cval->control << 8, &oval); if (err < 0) { if (cval->mixer->ignore_ctl_error) return 0; return err; } val = ucontrol->value.integer.value[0]; val = get_abs_value(cval, val); if (val != oval) { set_cur_ctl_value(cval, cval->control << 8, val); return 1; } return 0; } /* alsa control interface for processing/extension unit */ static struct snd_kcontrol_new mixer_procunit_ctl = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "", /* will be filled later */ .info = mixer_ctl_feature_info, .get = mixer_ctl_procunit_get, .put = mixer_ctl_procunit_put, }; /* * predefined data for processing units */ struct procunit_value_info { int control; char *suffix; int val_type; int min_value; }; struct procunit_info { int type; char *name; struct procunit_value_info *values; }; static struct procunit_value_info updown_proc_info[] = { { UAC_UD_ENABLE, "Switch", USB_MIXER_BOOLEAN }, { UAC_UD_MODE_SELECT, "Mode Select", USB_MIXER_U8, 1 }, { 0 } }; static struct procunit_value_info prologic_proc_info[] = { { UAC_DP_ENABLE, "Switch", USB_MIXER_BOOLEAN }, { UAC_DP_MODE_SELECT, "Mode Select", USB_MIXER_U8, 1 }, { 0 } }; static struct procunit_value_info threed_enh_proc_info[] = { { UAC_3D_ENABLE, "Switch", USB_MIXER_BOOLEAN }, { UAC_3D_SPACE, "Spaciousness", USB_MIXER_U8 }, { 0 } }; static struct procunit_value_info reverb_proc_info[] = { { UAC_REVERB_ENABLE, "Switch", USB_MIXER_BOOLEAN }, { UAC_REVERB_LEVEL, "Level", USB_MIXER_U8 }, { UAC_REVERB_TIME, "Time", USB_MIXER_U16 }, { UAC_REVERB_FEEDBACK, "Feedback", USB_MIXER_U8 }, { 0 } }; static struct procunit_value_info chorus_proc_info[] = { { UAC_CHORUS_ENABLE, "Switch", USB_MIXER_BOOLEAN }, { UAC_CHORUS_LEVEL, "Level", USB_MIXER_U8 }, { UAC_CHORUS_RATE, "Rate", USB_MIXER_U16 }, { UAC_CHORUS_DEPTH, "Depth", USB_MIXER_U16 }, { 0 } }; static struct procunit_value_info dcr_proc_info[] = { { UAC_DCR_ENABLE, "Switch", USB_MIXER_BOOLEAN }, { UAC_DCR_RATE, "Ratio", USB_MIXER_U16 }, { UAC_DCR_MAXAMPL, "Max Amp", USB_MIXER_S16 }, { UAC_DCR_THRESHOLD, "Threshold", USB_MIXER_S16 }, { UAC_DCR_ATTACK_TIME, "Attack Time", USB_MIXER_U16 }, { UAC_DCR_RELEASE_TIME, "Release Time", USB_MIXER_U16 }, { 0 } }; static struct procunit_info procunits[] = { { UAC_PROCESS_UP_DOWNMIX, "Up Down", updown_proc_info }, { UAC_PROCESS_DOLBY_PROLOGIC, "Dolby Prologic", prologic_proc_info }, { UAC_PROCESS_STEREO_EXTENDER, "3D Stereo Extender", threed_enh_proc_info }, { UAC_PROCESS_REVERB, "Reverb", reverb_proc_info }, { UAC_PROCESS_CHORUS, "Chorus", chorus_proc_info }, { UAC_PROCESS_DYN_RANGE_COMP, "DCR", dcr_proc_info }, { 0 }, }; /* * predefined data for extension units */ static struct procunit_value_info clock_rate_xu_info[] = { { USB_XU_CLOCK_RATE_SELECTOR, "Selector", USB_MIXER_U8, 0 }, { 0 } }; static struct procunit_value_info clock_source_xu_info[] = { { USB_XU_CLOCK_SOURCE_SELECTOR, "External", USB_MIXER_BOOLEAN }, { 0 } }; static struct procunit_value_info spdif_format_xu_info[] = { { USB_XU_DIGITAL_FORMAT_SELECTOR, "SPDIF/AC3", USB_MIXER_BOOLEAN }, { 0 } }; static struct procunit_value_info soft_limit_xu_info[] = { { USB_XU_SOFT_LIMIT_SELECTOR, " ", USB_MIXER_BOOLEAN }, { 0 } }; static struct procunit_info extunits[] = { { USB_XU_CLOCK_RATE, "Clock rate", clock_rate_xu_info }, { USB_XU_CLOCK_SOURCE, "DigitalIn CLK source", clock_source_xu_info }, { USB_XU_DIGITAL_IO_STATUS, "DigitalOut format:", spdif_format_xu_info }, { USB_XU_DEVICE_OPTIONS, "AnalogueIn Soft Limit", soft_limit_xu_info }, { 0 } }; /* * build a processing/extension unit */ static int build_audio_procunit(struct mixer_build *state, int unitid, void *raw_desc, struct procunit_info *list, char *name) { struct uac_processing_unit_descriptor *desc = raw_desc; int num_ins = desc->bNrInPins; struct usb_mixer_elem_info *cval; struct snd_kcontrol *kctl; int i, err, nameid, type, len; struct procunit_info *info; struct procunit_value_info *valinfo; const struct usbmix_name_map *map; static struct procunit_value_info default_value_info[] = { { 0x01, "Switch", USB_MIXER_BOOLEAN }, { 0 } }; static struct procunit_info default_info = { 0, NULL, default_value_info }; if (desc->bLength < 13 || desc->bLength < 13 + num_ins || desc->bLength < num_ins + uac_processing_unit_bControlSize(desc, state->mixer->protocol)) { snd_printk(KERN_ERR "invalid %s descriptor (id %d)\n", name, unitid); return -EINVAL; } for (i = 0; i < num_ins; i++) { if ((err = parse_audio_unit(state, desc->baSourceID[i])) < 0) return err; } type = le16_to_cpu(desc->wProcessType); for (info = list; info && info->type; info++) if (info->type == type) break; if (! info || ! info->type) info = &default_info; for (valinfo = info->values; valinfo->control; valinfo++) { __u8 *controls = uac_processing_unit_bmControls(desc, state->mixer->protocol); if (! (controls[valinfo->control / 8] & (1 << ((valinfo->control % 8) - 1)))) continue; map = find_map(state, unitid, valinfo->control); if (check_ignored_ctl(map)) continue; cval = kzalloc(sizeof(*cval), GFP_KERNEL); if (! cval) { snd_printk(KERN_ERR "cannot malloc kcontrol\n"); return -ENOMEM; } cval->mixer = state->mixer; cval->id = unitid; cval->control = valinfo->control; cval->val_type = valinfo->val_type; cval->channels = 1; /* get min/max values */ if (type == UAC_PROCESS_UP_DOWNMIX && cval->control == UAC_UD_MODE_SELECT) { __u8 *control_spec = uac_processing_unit_specific(desc, state->mixer->protocol); /* FIXME: hard-coded */ cval->min = 1; cval->max = control_spec[0]; cval->res = 1; cval->initialized = 1; } else { if (type == USB_XU_CLOCK_RATE) { /* E-Mu USB 0404/0202/TrackerPre/0204 * samplerate control quirk */ cval->min = 0; cval->max = 5; cval->res = 1; cval->initialized = 1; } else get_min_max(cval, valinfo->min_value); } kctl = snd_ctl_new1(&mixer_procunit_ctl, cval); if (! kctl) { snd_printk(KERN_ERR "cannot malloc kcontrol\n"); kfree(cval); return -ENOMEM; } kctl->private_free = usb_mixer_elem_free; if (check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name))) /* nothing */ ; else if (info->name) strlcpy(kctl->id.name, info->name, sizeof(kctl->id.name)); else { nameid = uac_processing_unit_iProcessing(desc, state->mixer->protocol); len = 0; if (nameid) len = snd_usb_copy_string_desc(state, nameid, kctl->id.name, sizeof(kctl->id.name)); if (! len) strlcpy(kctl->id.name, name, sizeof(kctl->id.name)); } append_ctl_name(kctl, " "); append_ctl_name(kctl, valinfo->suffix); snd_printdd(KERN_INFO "[%d] PU [%s] ch = %d, val = %d/%d\n", cval->id, kctl->id.name, cval->channels, cval->min, cval->max); if ((err = snd_usb_mixer_add_control(state->mixer, kctl)) < 0) return err; } return 0; } static int parse_audio_processing_unit(struct mixer_build *state, int unitid, void *raw_desc) { return build_audio_procunit(state, unitid, raw_desc, procunits, "Processing Unit"); } static int parse_audio_extension_unit(struct mixer_build *state, int unitid, void *raw_desc) { /* Note that we parse extension units with processing unit descriptors. * That's ok as the layout is the same */ return build_audio_procunit(state, unitid, raw_desc, extunits, "Extension Unit"); } /* * Selector Unit */ /* info callback for selector unit * use an enumerator type for routing */ static int mixer_ctl_selector_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct usb_mixer_elem_info *cval = kcontrol->private_data; const char **itemlist = (const char **)kcontrol->private_value; if (snd_BUG_ON(!itemlist)) return -EINVAL; return snd_ctl_enum_info(uinfo, 1, cval->max, itemlist); } /* get callback for selector unit */ static int mixer_ctl_selector_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_elem_info *cval = kcontrol->private_data; int val = 0, err; err = get_cur_ctl_value(cval, cval->control << 8, &val); if (err < 0) { if (cval->mixer->ignore_ctl_error) { ucontrol->value.enumerated.item[0] = 0; return 0; } return err; } val = get_relative_value(cval, val); ucontrol->value.enumerated.item[0] = val; return 0; } /* put callback for selector unit */ static int mixer_ctl_selector_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct usb_mixer_elem_info *cval = kcontrol->private_data; int val, oval = 0, err; err = get_cur_ctl_value(cval, cval->control << 8, &oval); if (err < 0) { if (cval->mixer->ignore_ctl_error) return 0; return err; } val = ucontrol->value.enumerated.item[0]; val = get_abs_value(cval, val); if (val != oval) { set_cur_ctl_value(cval, cval->control << 8, val); return 1; } return 0; } /* alsa control interface for selector unit */ static struct snd_kcontrol_new mixer_selectunit_ctl = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "", /* will be filled later */ .info = mixer_ctl_selector_info, .get = mixer_ctl_selector_get, .put = mixer_ctl_selector_put, }; /* private free callback. * free both private_data and private_value */ static void usb_mixer_selector_elem_free(struct snd_kcontrol *kctl) { int i, num_ins = 0; if (kctl->private_data) { struct usb_mixer_elem_info *cval = kctl->private_data; num_ins = cval->max; kfree(cval); kctl->private_data = NULL; } if (kctl->private_value) { char **itemlist = (char **)kctl->private_value; for (i = 0; i < num_ins; i++) kfree(itemlist[i]); kfree(itemlist); kctl->private_value = 0; } } /* * parse a selector unit */ static int parse_audio_selector_unit(struct mixer_build *state, int unitid, void *raw_desc) { struct uac_selector_unit_descriptor *desc = raw_desc; unsigned int i, nameid, len; int err; struct usb_mixer_elem_info *cval; struct snd_kcontrol *kctl; const struct usbmix_name_map *map; char **namelist; if (!desc->bNrInPins || desc->bLength < 5 + desc->bNrInPins) { snd_printk(KERN_ERR "invalid SELECTOR UNIT descriptor %d\n", unitid); return -EINVAL; } for (i = 0; i < desc->bNrInPins; i++) { if ((err = parse_audio_unit(state, desc->baSourceID[i])) < 0) return err; } if (desc->bNrInPins == 1) /* only one ? nonsense! */ return 0; map = find_map(state, unitid, 0); if (check_ignored_ctl(map)) return 0; cval = kzalloc(sizeof(*cval), GFP_KERNEL); if (! cval) { snd_printk(KERN_ERR "cannot malloc kcontrol\n"); return -ENOMEM; } cval->mixer = state->mixer; cval->id = unitid; cval->val_type = USB_MIXER_U8; cval->channels = 1; cval->min = 1; cval->max = desc->bNrInPins; cval->res = 1; cval->initialized = 1; if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR) cval->control = UAC2_CX_CLOCK_SELECTOR; else cval->control = 0; namelist = kmalloc(sizeof(char *) * desc->bNrInPins, GFP_KERNEL); if (! namelist) { snd_printk(KERN_ERR "cannot malloc\n"); kfree(cval); return -ENOMEM; } #define MAX_ITEM_NAME_LEN 64 for (i = 0; i < desc->bNrInPins; i++) { struct usb_audio_term iterm; len = 0; namelist[i] = kmalloc(MAX_ITEM_NAME_LEN, GFP_KERNEL); if (! namelist[i]) { snd_printk(KERN_ERR "cannot malloc\n"); while (i--) kfree(namelist[i]); kfree(namelist); kfree(cval); return -ENOMEM; } len = check_mapped_selector_name(state, unitid, i, namelist[i], MAX_ITEM_NAME_LEN); if (! len && check_input_term(state, desc->baSourceID[i], &iterm) >= 0) len = get_term_name(state, &iterm, namelist[i], MAX_ITEM_NAME_LEN, 0); if (! len) sprintf(namelist[i], "Input %d", i); } kctl = snd_ctl_new1(&mixer_selectunit_ctl, cval); if (! kctl) { snd_printk(KERN_ERR "cannot malloc kcontrol\n"); kfree(namelist); kfree(cval); return -ENOMEM; } kctl->private_value = (unsigned long)namelist; kctl->private_free = usb_mixer_selector_elem_free; nameid = uac_selector_unit_iSelector(desc); len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name)); if (len) ; else if (nameid) snd_usb_copy_string_desc(state, nameid, kctl->id.name, sizeof(kctl->id.name)); else { len = get_term_name(state, &state->oterm, kctl->id.name, sizeof(kctl->id.name), 0); if (! len) strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name)); if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR) append_ctl_name(kctl, " Clock Source"); else if ((state->oterm.type & 0xff00) == 0x0100) append_ctl_name(kctl, " Capture Source"); else append_ctl_name(kctl, " Playback Source"); } snd_printdd(KERN_INFO "[%d] SU [%s] items = %d\n", cval->id, kctl->id.name, desc->bNrInPins); if ((err = snd_usb_mixer_add_control(state->mixer, kctl)) < 0) return err; return 0; } /* * parse an audio unit recursively */ static int parse_audio_unit(struct mixer_build *state, int unitid) { unsigned char *p1; if (test_and_set_bit(unitid, state->unitbitmap)) return 0; /* the unit already visited */ p1 = find_audio_control_unit(state, unitid); if (!p1) { snd_printk(KERN_ERR "usbaudio: unit %d not found!\n", unitid); return -EINVAL; } switch (p1[2]) { case UAC_INPUT_TERMINAL: case UAC2_CLOCK_SOURCE: return 0; /* NOP */ case UAC_MIXER_UNIT: return parse_audio_mixer_unit(state, unitid, p1); case UAC_SELECTOR_UNIT: case UAC2_CLOCK_SELECTOR: return parse_audio_selector_unit(state, unitid, p1); case UAC_FEATURE_UNIT: return parse_audio_feature_unit(state, unitid, p1); case UAC1_PROCESSING_UNIT: /* UAC2_EFFECT_UNIT has the same value */ if (state->mixer->protocol == UAC_VERSION_1) return parse_audio_processing_unit(state, unitid, p1); else return 0; /* FIXME - effect units not implemented yet */ case UAC1_EXTENSION_UNIT: /* UAC2_PROCESSING_UNIT_V2 has the same value */ if (state->mixer->protocol == UAC_VERSION_1) return parse_audio_extension_unit(state, unitid, p1); else /* UAC_VERSION_2 */ return parse_audio_processing_unit(state, unitid, p1); case UAC2_EXTENSION_UNIT_V2: return parse_audio_extension_unit(state, unitid, p1); default: snd_printk(KERN_ERR "usbaudio: unit %u: unexpected type 0x%02x\n", unitid, p1[2]); return -EINVAL; } } static void snd_usb_mixer_free(struct usb_mixer_interface *mixer) { kfree(mixer->id_elems); if (mixer->urb) { kfree(mixer->urb->transfer_buffer); usb_free_urb(mixer->urb); } usb_free_urb(mixer->rc_urb); kfree(mixer->rc_setup_packet); kfree(mixer); } static int snd_usb_mixer_dev_free(struct snd_device *device) { struct usb_mixer_interface *mixer = device->device_data; snd_usb_mixer_free(mixer); return 0; } /* * create mixer controls * * walk through all UAC_OUTPUT_TERMINAL descriptors to search for mixers */ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer) { struct mixer_build state; int err; const struct usbmix_ctl_map *map; void *p; memset(&state, 0, sizeof(state)); state.chip = mixer->chip; state.mixer = mixer; state.buffer = mixer->hostif->extra; state.buflen = mixer->hostif->extralen; /* check the mapping table */ for (map = usbmix_ctl_maps; map->id; map++) { if (map->id == state.chip->usb_id) { state.map = map->map; state.selector_map = map->selector_map; mixer->ignore_ctl_error = map->ignore_ctl_error; break; } } p = NULL; while ((p = snd_usb_find_csint_desc(mixer->hostif->extra, mixer->hostif->extralen, p, UAC_OUTPUT_TERMINAL)) != NULL) { if (mixer->protocol == UAC_VERSION_1) { struct uac1_output_terminal_descriptor *desc = p; if (desc->bLength < sizeof(*desc)) continue; /* invalid descriptor? */ set_bit(desc->bTerminalID, state.unitbitmap); /* mark terminal ID as visited */ state.oterm.id = desc->bTerminalID; state.oterm.type = le16_to_cpu(desc->wTerminalType); state.oterm.name = desc->iTerminal; err = parse_audio_unit(&state, desc->bSourceID); if (err < 0 && err != -EINVAL) return err; } else { /* UAC_VERSION_2 */ struct uac2_output_terminal_descriptor *desc = p; if (desc->bLength < sizeof(*desc)) continue; /* invalid descriptor? */ set_bit(desc->bTerminalID, state.unitbitmap); /* mark terminal ID as visited */ state.oterm.id = desc->bTerminalID; state.oterm.type = le16_to_cpu(desc->wTerminalType); state.oterm.name = desc->iTerminal; err = parse_audio_unit(&state, desc->bSourceID); if (err < 0 && err != -EINVAL) return err; /* for UAC2, use the same approach to also add the clock selectors */ err = parse_audio_unit(&state, desc->bCSourceID); if (err < 0 && err != -EINVAL) return err; } } return 0; } void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid) { struct usb_mixer_elem_info *info; for (info = mixer->id_elems[unitid]; info; info = info->next_id_elem) snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE, info->elem_id); } static void snd_usb_mixer_dump_cval(struct snd_info_buffer *buffer, int unitid, struct usb_mixer_elem_info *cval) { static char *val_types[] = {"BOOLEAN", "INV_BOOLEAN", "S8", "U8", "S16", "U16"}; snd_iprintf(buffer, " Unit: %i\n", unitid); if (cval->elem_id) snd_iprintf(buffer, " Control: name=\"%s\", index=%i\n", cval->elem_id->name, cval->elem_id->index); snd_iprintf(buffer, " Info: id=%i, control=%i, cmask=0x%x, " "channels=%i, type=\"%s\"\n", cval->id, cval->control, cval->cmask, cval->channels, val_types[cval->val_type]); snd_iprintf(buffer, " Volume: min=%i, max=%i, dBmin=%i, dBmax=%i\n", cval->min, cval->max, cval->dBmin, cval->dBmax); } static void snd_usb_mixer_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_usb_audio *chip = entry->private_data; struct usb_mixer_interface *mixer; struct usb_mixer_elem_info *cval; int unitid; list_for_each_entry(mixer, &chip->mixer_list, list) { snd_iprintf(buffer, "USB Mixer: usb_id=0x%08x, ctrlif=%i, ctlerr=%i\n", chip->usb_id, snd_usb_ctrl_intf(chip), mixer->ignore_ctl_error); snd_iprintf(buffer, "Card: %s\n", chip->card->longname); for (unitid = 0; unitid < MAX_ID_ELEMS; unitid++) { for (cval = mixer->id_elems[unitid]; cval; cval = cval->next_id_elem) snd_usb_mixer_dump_cval(buffer, unitid, cval); } } } static void snd_usb_mixer_interrupt_v2(struct usb_mixer_interface *mixer, int attribute, int value, int index) { struct usb_mixer_elem_info *info; __u8 unitid = (index >> 8) & 0xff; __u8 control = (value >> 8) & 0xff; __u8 channel = value & 0xff; if (channel >= MAX_CHANNELS) { snd_printk(KERN_DEBUG "%s(): bogus channel number %d\n", __func__, channel); return; } for (info = mixer->id_elems[unitid]; info; info = info->next_id_elem) { if (info->control != control) continue; switch (attribute) { case UAC2_CS_CUR: /* invalidate cache, so the value is read from the device */ if (channel) info->cached &= ~(1 << channel); else /* master channel */ info->cached = 0; snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE, info->elem_id); break; case UAC2_CS_RANGE: /* TODO */ break; case UAC2_CS_MEM: /* TODO */ break; default: snd_printk(KERN_DEBUG "unknown attribute %d in interrupt\n", attribute); break; } /* switch */ } } static void snd_usb_mixer_interrupt(struct urb *urb) { struct usb_mixer_interface *mixer = urb->context; int len = urb->actual_length; int ustatus = urb->status; if (ustatus != 0) goto requeue; if (mixer->protocol == UAC_VERSION_1) { struct uac1_status_word *status; for (status = urb->transfer_buffer; len >= sizeof(*status); len -= sizeof(*status), status++) { snd_printd(KERN_DEBUG "status interrupt: %02x %02x\n", status->bStatusType, status->bOriginator); /* ignore any notifications not from the control interface */ if ((status->bStatusType & UAC1_STATUS_TYPE_ORIG_MASK) != UAC1_STATUS_TYPE_ORIG_AUDIO_CONTROL_IF) continue; if (status->bStatusType & UAC1_STATUS_TYPE_MEM_CHANGED) snd_usb_mixer_rc_memory_change(mixer, status->bOriginator); else snd_usb_mixer_notify_id(mixer, status->bOriginator); } } else { /* UAC_VERSION_2 */ struct uac2_interrupt_data_msg *msg; for (msg = urb->transfer_buffer; len >= sizeof(*msg); len -= sizeof(*msg), msg++) { /* drop vendor specific and endpoint requests */ if ((msg->bInfo & UAC2_INTERRUPT_DATA_MSG_VENDOR) || (msg->bInfo & UAC2_INTERRUPT_DATA_MSG_EP)) continue; snd_usb_mixer_interrupt_v2(mixer, msg->bAttribute, le16_to_cpu(msg->wValue), le16_to_cpu(msg->wIndex)); } } requeue: if (ustatus != -ENOENT && ustatus != -ECONNRESET && ustatus != -ESHUTDOWN) { urb->dev = mixer->chip->dev; usb_submit_urb(urb, GFP_ATOMIC); } } /* stop any bus activity of a mixer */ void snd_usb_mixer_inactivate(struct usb_mixer_interface *mixer) { usb_kill_urb(mixer->urb); usb_kill_urb(mixer->rc_urb); } int snd_usb_mixer_activate(struct usb_mixer_interface *mixer) { int err; if (mixer->urb) { err = usb_submit_urb(mixer->urb, GFP_NOIO); if (err < 0) return err; } return 0; } /* create the handler for the optional status interrupt endpoint */ static int snd_usb_mixer_status_create(struct usb_mixer_interface *mixer) { struct usb_endpoint_descriptor *ep; void *transfer_buffer; int buffer_length; unsigned int epnum; /* we need one interrupt input endpoint */ if (get_iface_desc(mixer->hostif)->bNumEndpoints < 1) return 0; ep = get_endpoint(mixer->hostif, 0); if (!usb_endpoint_dir_in(ep) || !usb_endpoint_xfer_int(ep)) return 0; epnum = usb_endpoint_num(ep); buffer_length = le16_to_cpu(ep->wMaxPacketSize); transfer_buffer = kmalloc(buffer_length, GFP_KERNEL); if (!transfer_buffer) return -ENOMEM; mixer->urb = usb_alloc_urb(0, GFP_KERNEL); if (!mixer->urb) { kfree(transfer_buffer); return -ENOMEM; } usb_fill_int_urb(mixer->urb, mixer->chip->dev, usb_rcvintpipe(mixer->chip->dev, epnum), transfer_buffer, buffer_length, snd_usb_mixer_interrupt, mixer, ep->bInterval); usb_submit_urb(mixer->urb, GFP_KERNEL); return 0; } int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif, int ignore_error) { static struct snd_device_ops dev_ops = { .dev_free = snd_usb_mixer_dev_free }; struct usb_mixer_interface *mixer; struct snd_info_entry *entry; int err; strcpy(chip->card->mixername, "USB Mixer"); mixer = kzalloc(sizeof(*mixer), GFP_KERNEL); if (!mixer) return -ENOMEM; mixer->chip = chip; mixer->ignore_ctl_error = ignore_error; mixer->id_elems = kcalloc(MAX_ID_ELEMS, sizeof(*mixer->id_elems), GFP_KERNEL); if (!mixer->id_elems) { kfree(mixer); return -ENOMEM; } mixer->hostif = &usb_ifnum_to_if(chip->dev, ctrlif)->altsetting[0]; switch (get_iface_desc(mixer->hostif)->bInterfaceProtocol) { case UAC_VERSION_1: default: mixer->protocol = UAC_VERSION_1; break; case UAC_VERSION_2: mixer->protocol = UAC_VERSION_2; break; } if ((err = snd_usb_mixer_controls(mixer)) < 0 || (err = snd_usb_mixer_status_create(mixer)) < 0) goto _error; snd_usb_mixer_apply_create_quirk(mixer); err = snd_device_new(chip->card, SNDRV_DEV_LOWLEVEL, mixer, &dev_ops); if (err < 0) goto _error; if (list_empty(&chip->mixer_list) && !snd_card_proc_new(chip->card, "usbmixer", &entry)) snd_info_set_text_ops(entry, chip, snd_usb_mixer_proc_read); list_add(&mixer->list, &chip->mixer_list); return 0; _error: snd_usb_mixer_free(mixer); return err; } void snd_usb_mixer_disconnect(struct list_head *p) { struct usb_mixer_interface *mixer; mixer = list_entry(p, struct usb_mixer_interface, list); usb_kill_urb(mixer->urb); usb_kill_urb(mixer->rc_urb); }
gpl-2.0
xxha/glibc
iconvdata/iso-ir-209.c
8
1094
/* Conversion from and to ISO-IR-209. Copyright (C) 2001-2013 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper <drepper@redhat.com>, 2001. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ #include <stdint.h> /* Get the conversion table. */ #define TABLES <iso-ir-209.h> #define CHARSET_NAME "ISO-IR-209//" #define HAS_HOLES 1 /* Not all 256 character are defined. */ #include <8bit-gap.c>
gpl-2.0
angelblue05/xbmc
xbmc/cdrip/EncoderFFmpeg.cpp
8
11035
/* * Copyright (C) 2005-2013 Team XBMC * http://kodi.tv * * This Program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This Program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with XBMC; see the file COPYING. If not, see * <http://www.gnu.org/licenses/>. * */ #ifndef __STDC_CONSTANT_MACROS #define __STDC_CONSTANT_MACROS #endif #ifndef __STDC_LIMIT_MACROS #define __STDC_LIMIT_MACROS #endif #include "stdint.h" #include "EncoderFFmpeg.h" #include "ServiceBroker.h" #include "utils/log.h" #include "settings/Settings.h" #include "utils/SystemInfo.h" #include "utils/URIUtils.h" #include "addons/AddonManager.h" /* AV_PKT_FLAG_KEY was named PKT_FLAG_KEY in older versions of libavcodec */ #ifndef AV_PKT_FLAG_KEY #define AV_PKT_FLAG_KEY PKT_FLAG_KEY #endif using namespace ADDON; CEncoderFFmpeg::CEncoderFFmpeg(): m_Format (NULL), m_CodecCtx (NULL), m_SwrCtx (NULL), m_Stream (NULL), m_Buffer (NULL), m_BufferSize(0), m_BufferFrame(NULL), m_ResampledBuffer(NULL), m_ResampledBufferSize(0), m_ResampledFrame(NULL), m_NeedConversion(false) { memset(&m_callbacks, 0, sizeof(m_callbacks)); } bool CEncoderFFmpeg::Init(AddonToKodiFuncTable_AudioEncoder& callbacks) { if (!callbacks.kodiInstance || !callbacks.write || !callbacks.seek) return false; m_callbacks = callbacks; std::string filename = URIUtils::GetFileName(m_strFile); if(avformat_alloc_output_context2(&m_Format,NULL,NULL,filename.c_str())) { CLog::Log(LOGERROR, "CEncoderFFmpeg::Init - Unable to guess the output format for the file %s", filename.c_str()); return false; } AVCodec *codec; codec = avcodec_find_encoder(m_Format->oformat->audio_codec); if (!codec) { CLog::Log(LOGERROR, "CEncoderFFmpeg::Init - Unable to find a suitable FFmpeg encoder"); return false; } m_Format->pb = avio_alloc_context(m_BCBuffer, sizeof(m_BCBuffer), AVIO_FLAG_WRITE, this, NULL, avio_write_callback, avio_seek_callback); if (!m_Format->pb) { av_freep(&m_Format); CLog::Log(LOGERROR, "CEncoderFFmpeg::Init - Failed to allocate ByteIOContext"); return false; } AddonPtr addon; CServiceBroker::GetAddonMgr().GetAddon(CServiceBroker::GetSettings().GetString(CSettings::SETTING_AUDIOCDS_ENCODER), addon); if (addon) { m_Format->bit_rate = (128+32*strtol(addon->GetSetting("bitrate").c_str(), NULL, 10))*1000; } /* add a stream to it */ m_Stream = avformat_new_stream(m_Format, codec); if (!m_Stream) { av_freep(&m_Format->pb); av_freep(&m_Format); CLog::Log(LOGERROR, "CEncoderFFmpeg::Init - Failed to allocate AVStream context"); return false; } /* set the stream's parameters */ m_CodecCtx = m_Stream->codec; m_CodecCtx->codec_id = codec->id; m_CodecCtx->codec_type = AVMEDIA_TYPE_AUDIO; m_CodecCtx->bit_rate = m_Format->bit_rate; m_CodecCtx->sample_rate = m_iInSampleRate; m_CodecCtx->channels = m_iInChannels; m_CodecCtx->channel_layout = av_get_default_channel_layout(m_iInChannels); m_CodecCtx->time_base.num = 1; m_CodecCtx->time_base.den = m_iInSampleRate; /* Allow experimental encoders (like FFmpeg builtin AAC encoder) */ m_CodecCtx->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL; if(m_Format->oformat->flags & AVFMT_GLOBALHEADER) { m_CodecCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; m_Format->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; } switch(m_iInBitsPerSample) { case 8: m_InFormat = AV_SAMPLE_FMT_U8 ; break; case 16: m_InFormat = AV_SAMPLE_FMT_S16; break; case 32: m_InFormat = AV_SAMPLE_FMT_S32; break; default: av_freep(&m_Stream); av_freep(&m_Format->pb); av_freep(&m_Format); return false; } m_OutFormat = codec->sample_fmts[0]; m_CodecCtx->sample_fmt = m_OutFormat; m_NeedConversion = (m_OutFormat != m_InFormat); if (m_OutFormat <= AV_SAMPLE_FMT_NONE || avcodec_open2(m_CodecCtx, codec, NULL)) { CLog::Log(LOGERROR, "CEncoderFFmpeg::Init - Failed to open the codec %s", codec->long_name ? codec->long_name : codec->name); av_freep(&m_Stream); av_freep(&m_Format->pb); av_freep(&m_Format); return false; } /* calculate how many bytes we need per frame */ m_NeededFrames = m_CodecCtx->frame_size; m_NeededBytes = av_samples_get_buffer_size(NULL, m_iInChannels, m_NeededFrames, m_InFormat, 0); m_Buffer = (uint8_t*)av_malloc(m_NeededBytes); m_BufferSize = 0; m_BufferFrame = av_frame_alloc(); if(!m_BufferFrame || !m_Buffer) { CLog::Log(LOGERROR, "CEncoderFFmpeg::Init - Failed to allocate necessary buffers"); av_frame_free(&m_BufferFrame); av_freep(&m_Buffer); av_freep(&m_Stream); av_freep(&m_Format->pb); av_freep(&m_Format); return false; } m_BufferFrame->nb_samples = m_CodecCtx->frame_size; m_BufferFrame->format = m_InFormat; m_BufferFrame->channel_layout = m_CodecCtx->channel_layout; avcodec_fill_audio_frame(m_BufferFrame, m_iInChannels, m_InFormat, m_Buffer, m_NeededBytes, 0); if(m_NeedConversion) { m_SwrCtx = swr_alloc_set_opts(NULL, m_CodecCtx->channel_layout, m_OutFormat, m_CodecCtx->sample_rate, m_CodecCtx->channel_layout, m_InFormat, m_CodecCtx->sample_rate, 0, NULL); if(!m_SwrCtx || swr_init(m_SwrCtx) < 0) { CLog::Log(LOGERROR, "CEncoderFFmpeg::Init - Failed to initialize the resampler"); av_frame_free(&m_BufferFrame); av_freep(&m_Buffer); av_freep(&m_Stream); av_freep(&m_Format->pb); av_freep(&m_Format); return false; } m_ResampledBufferSize = av_samples_get_buffer_size(NULL, m_iInChannels, m_NeededFrames, m_OutFormat, 0); m_ResampledBuffer = (uint8_t*)av_malloc(m_ResampledBufferSize); m_ResampledFrame = av_frame_alloc(); if(!m_ResampledBuffer || !m_ResampledFrame) { CLog::Log(LOGERROR, "CEncoderFFmpeg::Init - Failed to allocate a frame for resampling"); av_frame_free(&m_ResampledFrame); av_freep(&m_ResampledBuffer); swr_free(&m_SwrCtx); av_frame_free(&m_BufferFrame); av_freep(&m_Buffer); av_freep(&m_Stream); av_freep(&m_Format->pb); av_freep(&m_Format); return false; } m_ResampledFrame->nb_samples = m_NeededFrames; m_ResampledFrame->format = m_OutFormat; m_ResampledFrame->channel_layout = m_CodecCtx->channel_layout; avcodec_fill_audio_frame(m_ResampledFrame, m_iInChannels, m_OutFormat, m_ResampledBuffer, m_ResampledBufferSize, 0); } /* set the tags */ SetTag("album" , m_strAlbum); SetTag("album_artist", m_strArtist); SetTag("genre" , m_strGenre); SetTag("title" , m_strTitle); SetTag("track" , m_strTrack); SetTag("encoder" , CSysInfo::GetAppName() + " FFmpeg Encoder"); /* write the header */ if (avformat_write_header(m_Format, NULL) != 0) { CLog::Log(LOGERROR, "CEncoderFFmpeg::Init - Failed to write the header"); av_frame_free(&m_ResampledFrame); av_freep(&m_ResampledBuffer); swr_free(&m_SwrCtx); av_frame_free(&m_BufferFrame); av_freep(&m_Buffer); av_freep(&m_Stream); av_freep(&m_Format->pb); av_freep(&m_Format); return false; } CLog::Log(LOGDEBUG, "CEncoderFFmpeg::Init - Successfully initialized with muxer %s and codec %s", m_Format->oformat->long_name? m_Format->oformat->long_name : m_Format->oformat->name, codec->long_name? codec->long_name : codec->name); return true; } void CEncoderFFmpeg::SetTag(const std::string &tag, const std::string &value) { av_dict_set(&m_Format->metadata, tag.c_str(), value.c_str(), 0); } int CEncoderFFmpeg::avio_write_callback(void *opaque, uint8_t *buf, int buf_size) { CEncoderFFmpeg *enc = static_cast<CEncoderFFmpeg*>(opaque); if(enc->m_callbacks.write(enc->m_callbacks.kodiInstance, buf, buf_size) != buf_size) { CLog::Log(LOGERROR, "Error writing FFmpeg buffer to file"); return -1; } return buf_size; } int64_t CEncoderFFmpeg::avio_seek_callback(void *opaque, int64_t offset, int whence) { CEncoderFFmpeg *enc = static_cast<CEncoderFFmpeg*>(opaque); return enc->m_callbacks.seek(enc->m_callbacks.kodiInstance, offset, whence); } int CEncoderFFmpeg::Encode(int nNumBytesRead, uint8_t* pbtStream) { while(nNumBytesRead > 0) { unsigned int space = m_NeededBytes - m_BufferSize; unsigned int copy = (unsigned int)nNumBytesRead > space ? space : nNumBytesRead; memcpy(&m_Buffer[m_BufferSize], pbtStream, copy); m_BufferSize += copy; pbtStream += copy; nNumBytesRead -= copy; /* only write full packets */ if (m_BufferSize == m_NeededBytes) { if (!WriteFrame()) return 0; } } return 1; } bool CEncoderFFmpeg::WriteFrame() { int encoded, got_output; AVFrame* frame; av_init_packet(&m_Pkt); m_Pkt.data = NULL; m_Pkt.size = 0; if(m_NeedConversion) { if (swr_convert(m_SwrCtx, m_ResampledFrame->extended_data, m_NeededFrames, (const uint8_t**)m_BufferFrame->extended_data, m_NeededFrames) < 0) { CLog::Log(LOGERROR, "CEncoderFFmpeg::WriteFrame - Error resampling audio"); return false; } frame = m_ResampledFrame; } else frame = m_BufferFrame; encoded = avcodec_encode_audio2(m_CodecCtx, &m_Pkt, frame, &got_output); m_BufferSize = 0; if (encoded < 0) { CLog::Log(LOGERROR, "CEncoderFFmpeg::WriteFrame - Error encoding audio: %i", encoded); return false; } if (got_output) { if (av_write_frame(m_Format, &m_Pkt) < 0) { CLog::Log(LOGERROR, "CEncoderFFMmpeg::WriteFrame - Failed to write the frame data"); return false; } } av_packet_unref(&m_Pkt); return true; } bool CEncoderFFmpeg::Close() { if (m_Format) { /* if there is anything still in the buffer */ if (m_BufferSize > 0) { /* zero the unused space so we dont encode random junk */ memset(&m_Buffer[m_BufferSize], 0, m_NeededBytes - m_BufferSize); /* write any remaining data */ WriteFrame(); } /* Flush if needed */ av_freep(&m_Buffer); av_frame_free(&m_BufferFrame); swr_free(&m_SwrCtx); av_frame_free(&m_ResampledFrame); av_freep(&m_ResampledBuffer); m_NeedConversion = false; WriteFrame(); /* write the trailer */ av_write_trailer(m_Format); /* cleanup */ avcodec_close(m_CodecCtx); av_freep(&m_Stream ); av_freep(&m_Format->pb); av_freep(&m_Format ); } m_BufferSize = 0; return true; }
gpl-2.0
gbiyer/Sony-Aosp-Kernel
arch/arm64/kernel/psci.c
8
12616
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Copyright (C) 2013 ARM Limited * * Author: Will Deacon <will.deacon@arm.com> */ #define pr_fmt(fmt) "psci: " fmt #include <linux/init.h> #include <linux/of.h> #include <linux/smp.h> #include <linux/reboot.h> #include <linux/pm.h> #include <linux/delay.h> #include <uapi/linux/psci.h> #include <asm/compiler.h> #include <asm/cpu_ops.h> #include <asm/errno.h> #include <asm/psci.h> #include <asm/smp_plat.h> #include <asm/system_misc.h> #include <asm/suspend.h> #define PSCI_POWER_STATE_TYPE_STANDBY 0 #define PSCI_POWER_STATE_TYPE_POWER_DOWN 1 #define PSCI_POWER_STATE_BIT BIT(30) struct psci_power_state { u16 id; u8 type; u8 affinity_level; }; struct psci_operations { int (*cpu_suspend)(unsigned long state_id, unsigned long entry_point); int (*cpu_off)(struct psci_power_state state); int (*cpu_on)(unsigned long cpuid, unsigned long entry_point); int (*migrate)(unsigned long cpuid); int (*affinity_info)(unsigned long target_affinity, unsigned long lowest_affinity_level); int (*migrate_info_type)(void); }; static struct psci_operations psci_ops; static int (*invoke_psci_fn)(u64, u64, u64, u64); typedef int (*psci_initcall_t)(const struct device_node *); enum psci_function { PSCI_FN_CPU_SUSPEND, PSCI_FN_CPU_ON, PSCI_FN_CPU_OFF, PSCI_FN_MIGRATE, PSCI_FN_AFFINITY_INFO, PSCI_FN_MIGRATE_INFO_TYPE, PSCI_FN_MAX, }; static u32 psci_function_id[PSCI_FN_MAX]; static int psci_to_linux_errno(int errno) { switch (errno) { case PSCI_RET_SUCCESS: return 0; case PSCI_RET_NOT_SUPPORTED: return -EOPNOTSUPP; case PSCI_RET_INVALID_PARAMS: return -EINVAL; case PSCI_RET_DENIED: return -EPERM; }; return -EINVAL; } static u32 psci_power_state_pack(struct psci_power_state state) { return ((state.id << PSCI_0_2_POWER_STATE_ID_SHIFT) & PSCI_0_2_POWER_STATE_ID_MASK) | ((state.type << PSCI_0_2_POWER_STATE_TYPE_SHIFT) & PSCI_0_2_POWER_STATE_TYPE_MASK) | ((state.affinity_level << PSCI_0_2_POWER_STATE_AFFL_SHIFT) & PSCI_0_2_POWER_STATE_AFFL_MASK); } /* * The following two functions are invoked via the invoke_psci_fn pointer * and will not be inlined, allowing us to piggyback on the AAPCS. */ static noinline int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) { asm volatile( __asmeq("%0", "x0") __asmeq("%1", "x1") __asmeq("%2", "x2") __asmeq("%3", "x3") "hvc #0\n" : "+r" (function_id) : "r" (arg0), "r" (arg1), "r" (arg2)); return function_id; } static noinline int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) { asm volatile( __asmeq("%0", "x0") __asmeq("%1", "x1") __asmeq("%2", "x2") __asmeq("%3", "x3") "smc #0\n" : "+r" (function_id) : "r" (arg0), "r" (arg1), "r" (arg2)); return function_id; } static int psci_get_version(void) { int err; err = invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0); return err; } static int psci_cpu_suspend(unsigned long state_id, unsigned long entry_point) { int err; u32 fn; fn = psci_function_id[PSCI_FN_CPU_SUSPEND]; err = invoke_psci_fn(fn, state_id, entry_point, 0); return psci_to_linux_errno(err); } static int psci_cpu_off(struct psci_power_state state) { int err; u32 fn, power_state; fn = psci_function_id[PSCI_FN_CPU_OFF]; power_state = psci_power_state_pack(state); err = invoke_psci_fn(fn, power_state, 0, 0); return psci_to_linux_errno(err); } static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point) { int err; u32 fn; fn = psci_function_id[PSCI_FN_CPU_ON]; err = invoke_psci_fn(fn, cpuid, entry_point, 0); return psci_to_linux_errno(err); } static int psci_migrate(unsigned long cpuid) { int err; u32 fn; fn = psci_function_id[PSCI_FN_MIGRATE]; err = invoke_psci_fn(fn, cpuid, 0, 0); return psci_to_linux_errno(err); } static int psci_affinity_info(unsigned long target_affinity, unsigned long lowest_affinity_level) { int err; u32 fn; fn = psci_function_id[PSCI_FN_AFFINITY_INFO]; err = invoke_psci_fn(fn, target_affinity, lowest_affinity_level, 0); return err; } static int psci_migrate_info_type(void) { int err; u32 fn; fn = psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE]; err = invoke_psci_fn(fn, 0, 0, 0); return err; } static int get_set_conduit_method(struct device_node *np) { const char *method; pr_info("probing for conduit method from DT.\n"); if (of_property_read_string(np, "method", &method)) { pr_warn("missing \"method\" property\n"); return -ENXIO; } if (!strcmp("hvc", method)) { invoke_psci_fn = __invoke_psci_fn_hvc; } else if (!strcmp("smc", method)) { invoke_psci_fn = __invoke_psci_fn_smc; } else { pr_warn("invalid \"method\" property: %s\n", method); return -EINVAL; } return 0; } static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd) { invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0); } static void psci_sys_poweroff(void) { invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0); } /* * PSCI Function IDs for v0.2+ are well defined so use * standard values. */ static int psci_1_0_init(struct device_node *np) { int err, ver; err = get_set_conduit_method(np); if (err) goto out_put_node; ver = psci_get_version(); if (ver == PSCI_RET_NOT_SUPPORTED) { /* PSCI v1.0 mandates implementation of PSCI_ID_VERSION. */ pr_err("PSCI firmware does not comply with the v1.0 spec.\n"); err = -EOPNOTSUPP; goto out_put_node; } else { pr_info("PSCIv%d.%d detected in firmware.\n", PSCI_VERSION_MAJOR(ver), PSCI_VERSION_MINOR(ver)); if (PSCI_VERSION_MAJOR(ver) != 1) { err = -EINVAL; pr_err("Conflicting PSCI version detected.\n"); goto out_put_node; } } pr_info("Using standard PSCI v0.2 function IDs\n"); psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN64_CPU_SUSPEND; psci_ops.cpu_suspend = psci_cpu_suspend; psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF; psci_ops.cpu_off = psci_cpu_off; psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN64_CPU_ON; psci_ops.cpu_on = psci_cpu_on; psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN64_MIGRATE; psci_ops.migrate = psci_migrate; psci_function_id[PSCI_FN_AFFINITY_INFO] = PSCI_0_2_FN64_AFFINITY_INFO; psci_ops.affinity_info = psci_affinity_info; psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE] = PSCI_0_2_FN_MIGRATE_INFO_TYPE; psci_ops.migrate_info_type = psci_migrate_info_type; out_put_node: of_node_put(np); return err; } /* * PSCI Function IDs for v0.2+ are well defined so use * standard values. */ static int psci_0_2_init(struct device_node *np) { int err, ver; err = get_set_conduit_method(np); if (err) goto out_put_node; ver = psci_get_version(); if (ver == PSCI_RET_NOT_SUPPORTED) { /* PSCI v0.2 mandates implementation of PSCI_ID_VERSION. */ pr_err("PSCI firmware does not comply with the v0.2 spec.\n"); err = -EOPNOTSUPP; goto out_put_node; } else { pr_info("PSCIv%d.%d detected in firmware.\n", PSCI_VERSION_MAJOR(ver), PSCI_VERSION_MINOR(ver)); if (PSCI_VERSION_MAJOR(ver) == 0 && PSCI_VERSION_MINOR(ver) < 2) { err = -EINVAL; pr_err("Conflicting PSCI version detected.\n"); goto out_put_node; } } pr_info("Using standard PSCI v0.2 function IDs\n"); psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN64_CPU_SUSPEND; psci_ops.cpu_suspend = psci_cpu_suspend; psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF; psci_ops.cpu_off = psci_cpu_off; psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN64_CPU_ON; psci_ops.cpu_on = psci_cpu_on; psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN64_MIGRATE; psci_ops.migrate = psci_migrate; psci_function_id[PSCI_FN_AFFINITY_INFO] = PSCI_0_2_FN64_AFFINITY_INFO; psci_ops.affinity_info = psci_affinity_info; psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE] = PSCI_0_2_FN_MIGRATE_INFO_TYPE; psci_ops.migrate_info_type = psci_migrate_info_type; arm_pm_restart = psci_sys_reset; pm_power_off = psci_sys_poweroff; out_put_node: of_node_put(np); return err; } /* * PSCI < v0.2 get PSCI Function IDs via DT. */ static int psci_0_1_init(struct device_node *np) { u32 id; int err; err = get_set_conduit_method(np); if (err) goto out_put_node; pr_info("Using PSCI v0.1 Function IDs from DT\n"); if (!of_property_read_u32(np, "cpu_suspend", &id)) { psci_function_id[PSCI_FN_CPU_SUSPEND] = id; psci_ops.cpu_suspend = psci_cpu_suspend; } if (!of_property_read_u32(np, "cpu_off", &id)) { psci_function_id[PSCI_FN_CPU_OFF] = id; psci_ops.cpu_off = psci_cpu_off; } if (!of_property_read_u32(np, "cpu_on", &id)) { psci_function_id[PSCI_FN_CPU_ON] = id; psci_ops.cpu_on = psci_cpu_on; } if (!of_property_read_u32(np, "migrate", &id)) { psci_function_id[PSCI_FN_MIGRATE] = id; psci_ops.migrate = psci_migrate; } out_put_node: of_node_put(np); return err; } static const struct of_device_id psci_of_match[] __initconst = { { .compatible = "arm,psci", .data = psci_0_1_init}, { .compatible = "arm,psci-0.2", .data = psci_0_2_init}, { .compatible = "arm,psci-1.0", .data = psci_1_0_init}, {}, }; int __init psci_init(void) { struct device_node *np; const struct of_device_id *matched_np; psci_initcall_t init_fn; np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np); if (!np) return -ENODEV; init_fn = (psci_initcall_t)matched_np->data; return init_fn(np); } #ifdef CONFIG_SMP static int __init cpu_psci_cpu_init(struct device_node *dn, unsigned int cpu) { pr_info("Initializing psco_cpu_init\n"); return 0; } static int __init cpu_psci_cpu_prepare(unsigned int cpu) { if (!psci_ops.cpu_on) { pr_err("no cpu_on method, not booting CPU%d\n", cpu); return -ENODEV; } return 0; } static int cpu_psci_cpu_boot(unsigned int cpu) { int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_entry)); if (err) pr_err("failed to boot CPU%d (%d)\n", cpu, err); return err; } #ifdef CONFIG_HOTPLUG_CPU static int cpu_psci_cpu_disable(unsigned int cpu) { /* Fail early if we don't have CPU_OFF support */ if (!psci_ops.cpu_off) return -EOPNOTSUPP; return 0; } static void cpu_psci_cpu_die(unsigned int cpu) { int ret; /* * There are no known implementations of PSCI actually using the * power state field, pass a sensible default for now. */ struct psci_power_state state = { .type = PSCI_POWER_STATE_TYPE_POWER_DOWN, }; ret = psci_ops.cpu_off(state); pr_crit("unable to power off CPU%u (%d)\n", cpu, ret); } static int cpu_psci_cpu_kill(unsigned int cpu) { int err, i; if (!psci_ops.affinity_info) return 1; /* * cpu_kill could race with cpu_die and we can * potentially end up declaring this cpu undead * while it is dying. So, try again a few times. */ for (i = 0; i < 10; i++) { err = psci_ops.affinity_info(cpu_logical_map(cpu), 0); if (err == PSCI_0_2_AFFINITY_LEVEL_OFF) { pr_debug("CPU%d killed.\n", cpu); return 1; } msleep(10); pr_debug("Retrying again to check for CPU kill\n"); } pr_warn("CPU%d may not have shut down cleanly (AFFINITY_INFO reports %d)\n", cpu, err); /* Make op_cpu_kill() fail. */ return 0; } #endif static int psci_suspend_finisher(unsigned long state_id) { return psci_ops.cpu_suspend(state_id, virt_to_phys(cpu_resume)); } /* * The PSCI changes are to support Os initiated low power mode where the * cluster mode aggregation happens in HLOS. In this case, the cpuidle * driver aggregates the cluster low power mode will provide in the * composite stateID to be passed down to the PSCI layer. */ static int cpu_psci_cpu_suspend(unsigned long state_id) { if (WARN_ON_ONCE(!state_id)) return -EINVAL; if (state_id & PSCI_POWER_STATE_BIT) return __cpu_suspend(state_id, psci_suspend_finisher); else return psci_ops.cpu_suspend(state_id, 0); } static const struct cpu_operations cpu_psci_ops = { .name = "psci", .cpu_init = cpu_psci_cpu_init, #ifdef CONFIG_ARM64_CPU_SUSPEND .cpu_suspend = cpu_psci_cpu_suspend, #endif .cpu_prepare = cpu_psci_cpu_prepare, .cpu_boot = cpu_psci_cpu_boot, #ifdef CONFIG_HOTPLUG_CPU .cpu_disable = cpu_psci_cpu_disable, .cpu_die = cpu_psci_cpu_die, .cpu_kill = cpu_psci_cpu_kill, #endif }; CPU_METHOD_OF_DECLARE(psci, &cpu_psci_ops); #endif
gpl-2.0
kandycs/kitten-perf
linux/ofed/1.5.1/drivers/infiniband/hw/qib/qib_driver.c
8
22568
/* * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/spinlock.h> #include <linux/pci.h> #include <linux/io.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/vmalloc.h> #include "qib.h" /* * The size has to be longer than this string, so we can append * board/chip information to it in the init code. */ const char ib_qib_version[] = QIB_IDSTR "\n"; DEFINE_SPINLOCK(qib_devs_lock); LIST_HEAD(qib_dev_list); DEFINE_MUTEX(qib_mutex); /* general driver use */ unsigned qib_debug; module_param_named(debug, qib_debug, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(debug, "mask for debug prints"); unsigned qib_ibmtu; module_param_named(ibmtu, qib_ibmtu, uint, S_IRUGO); MODULE_PARM_DESC(ibmtu, "Set max IB MTU (0=2KB, 1=256, 2=512, ... 5=4096"); unsigned qib_compat_ddr_negotiate = 1; module_param_named(compat_ddr_negotiate, qib_compat_ddr_negotiate, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(compat_ddr_negotiate, "Attempt pre-IBTA 1.2 DDR speed negotiation"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("QLogic <support@qlogic.com>"); MODULE_DESCRIPTION("QLogic IB driver"); /* * QIB_PIO_MAXIBHDR is the max IB header size allowed for in our * PIO send buffers. This is well beyond anything currently * defined in the InfiniBand spec. */ #define QIB_PIO_MAXIBHDR 128 struct qlogic_ib_stats qib_stats; const char *qib_get_unit_name(int unit) { static char iname[16]; snprintf(iname, sizeof iname, "infinipath%u", unit); return iname; } /* * Return count of units with at least one port ACTIVE. */ int qib_count_active_units(void) { struct qib_devdata *dd; struct qib_pportdata *ppd; unsigned long flags; int pidx, nunits_active = 0; spin_lock_irqsave(&qib_devs_lock, flags); list_for_each_entry(dd, &qib_dev_list, list) { if (!(dd->flags & QIB_PRESENT) || !dd->kregbase) continue; for (pidx = 0; pidx < dd->num_pports; ++pidx) { ppd = dd->pport + pidx; if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE))) { nunits_active++; break; } } } spin_unlock_irqrestore(&qib_devs_lock, flags); return nunits_active; } /* * Return count of all units, optionally return in arguments * the number of usable (present) units, and the number of * ports that are up. */ int qib_count_units(int *npresentp, int *nupp) { int nunits = 0, npresent = 0, nup = 0; struct qib_devdata *dd; unsigned long flags; int pidx; struct qib_pportdata *ppd; spin_lock_irqsave(&qib_devs_lock, flags); list_for_each_entry(dd, &qib_dev_list, list) { nunits++; if ((dd->flags & QIB_PRESENT) && dd->kregbase) npresent++; for (pidx = 0; pidx < dd->num_pports; ++pidx) { ppd = dd->pport + pidx; if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE))) nup++; } } spin_unlock_irqrestore(&qib_devs_lock, flags); if (npresentp) *npresentp = npresent; if (nupp) *nupp = nup; return nunits; } /** * qib_wait_linkstate - wait for an IB link state change to occur * @dd: the qlogic_ib device * @state: the state to wait for * @msecs: the number of milliseconds to wait * * wait up to msecs milliseconds for IB link state change to occur for * now, take the easy polling route. Currently used only by * qib_set_linkstate. Returns 0 if state reached, otherwise * -ETIMEDOUT state can have multiple states set, for any of several * transitions. */ int qib_wait_linkstate(struct qib_pportdata *ppd, u32 state, int msecs) { int ret; unsigned long flags; spin_lock_irqsave(&ppd->lflags_lock, flags); if (ppd->state_wanted) { spin_unlock_irqrestore(&ppd->lflags_lock, flags); ret = -EBUSY; goto bail; } ppd->state_wanted = state; spin_unlock_irqrestore(&ppd->lflags_lock, flags); wait_event_interruptible_timeout(ppd->state_wait, (ppd->lflags & state), msecs_to_jiffies(msecs)); spin_lock_irqsave(&ppd->lflags_lock, flags); ppd->state_wanted = 0; spin_unlock_irqrestore(&ppd->lflags_lock, flags); if (!(ppd->lflags & state)) ret = -ETIMEDOUT; else ret = 0; bail: return ret; } int qib_set_linkstate(struct qib_pportdata *ppd, u8 newstate) { u32 lstate; int ret; struct qib_devdata *dd = ppd->dd; unsigned long flags; switch (newstate) { case QIB_IB_LINKDOWN_ONLY: dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE, IB_LINKCMD_DOWN | IB_LINKINITCMD_NOP); /* don't wait */ ret = 0; goto bail; case QIB_IB_LINKDOWN: dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE, IB_LINKCMD_DOWN | IB_LINKINITCMD_POLL); /* don't wait */ ret = 0; goto bail; case QIB_IB_LINKDOWN_SLEEP: dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE, IB_LINKCMD_DOWN | IB_LINKINITCMD_SLEEP); /* don't wait */ ret = 0; goto bail; case QIB_IB_LINKDOWN_DISABLE: dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE, IB_LINKCMD_DOWN | IB_LINKINITCMD_DISABLE); /* don't wait */ ret = 0; goto bail; case QIB_IB_LINKARM: if (ppd->lflags & QIBL_LINKARMED) { qib_dbg("Asked for ARM, already there, skip\n"); ret = 0; goto bail; } if (!(ppd->lflags & (QIBL_LINKINIT | QIBL_LINKACTIVE))) { qib_dbg("Asked for ARM, lflags %x, error\n", ppd->lflags); ret = -EINVAL; goto bail; } /* * Since the port can be ACTIVE when we ask for ARMED, * clear QIBL_LINKV so we can wait for a transition. * If the link isn't ARMED, then something else happened * and there is no point waiting for ARMED. */ spin_lock_irqsave(&ppd->lflags_lock, flags); ppd->lflags &= ~QIBL_LINKV; spin_unlock_irqrestore(&ppd->lflags_lock, flags); dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE, IB_LINKCMD_ARMED | IB_LINKINITCMD_NOP); lstate = QIBL_LINKV; break; case QIB_IB_LINKACTIVE: if (ppd->lflags & QIBL_LINKACTIVE) { qib_dbg("Asked for ACTIVE, already there, skip\n"); ret = 0; goto bail; } if (!(ppd->lflags & QIBL_LINKARMED)) { qib_dbg("Asked for ACTIVE, lflags %x, error\n", ppd->lflags); ret = -EINVAL; goto bail; } dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE, IB_LINKCMD_ACTIVE | IB_LINKINITCMD_NOP); lstate = QIBL_LINKACTIVE; break; default: qib_dbg("Invalid linkstate 0x%x requested\n", newstate); ret = -EINVAL; goto bail; } ret = qib_wait_linkstate(ppd, lstate, 10); bail: return ret; } /* * Get address of eager buffer from it's index (allocated in chunks, not * contiguous). */ static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail) { const u32 chunk = etail / rcd->rcvegrbufs_perchunk; const u32 idx = etail % rcd->rcvegrbufs_perchunk; return rcd->rcvegrbuf[chunk] + idx * rcd->dd->rcvegrbufsize; } /** * get_rhf_errstring - decode RHF errors * @err: the err number * @msg: the output buffer * @len: the length of the output buffer * * only used one place now, may want more later */ static void get_rhf_errstring(u32 err, char *msg, size_t len) { /* if no errors, and so don't need to check what's first */ *msg = '\0'; if (err & QLOGIC_IB_RHF_H_ICRCERR) strlcat(msg, "icrcerr ", len); if (err & QLOGIC_IB_RHF_H_VCRCERR) strlcat(msg, "vcrcerr ", len); if (err & QLOGIC_IB_RHF_H_PARITYERR) strlcat(msg, "parityerr ", len); if (err & QLOGIC_IB_RHF_H_LENERR) strlcat(msg, "lenerr ", len); if (err & QLOGIC_IB_RHF_H_MTUERR) strlcat(msg, "mtuerr ", len); if (err & QLOGIC_IB_RHF_H_IHDRERR) /* qlogic_ib hdr checksum error */ strlcat(msg, "qibhdrerr ", len); if (err & QLOGIC_IB_RHF_H_TIDERR) strlcat(msg, "tiderr ", len); if (err & QLOGIC_IB_RHF_H_MKERR) /* bad ctxt, offset, etc. */ strlcat(msg, "invalid qibhdr ", len); if (err & QLOGIC_IB_RHF_H_IBERR) strlcat(msg, "iberr ", len); if (err & QLOGIC_IB_RHF_L_SWA) strlcat(msg, "swA ", len); if (err & QLOGIC_IB_RHF_L_SWB) strlcat(msg, "swB ", len); } /* * Returns 1 if error was a CRC, else 0. * Needed for some chip's synthesized error counters. */ static u32 qib_rcv_hdrerr(struct qib_pportdata *ppd, u32 ctxt, u32 eflags, u32 l, u32 etail, __le32 *rhf_addr, struct qib_message_header *hdr) { char emsg[128]; u32 ret = 0; get_rhf_errstring(eflags, emsg, sizeof emsg); qib_cdbg(ERRPKT, "IB%u:%u ctxt %u RHF %x qtail=%x typ=%u " "tlen=%x opcode=%x egridx=%x: %s\n", ppd->dd->unit, ppd->port, ctxt, eflags, l, qib_hdrget_rcv_type(rhf_addr), qib_hdrget_length_in_bytes(rhf_addr), be32_to_cpu(hdr->bth[0]) >> 24, etail, emsg); if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR)) ret = 1; return ret; } /* * qib_kreceive - receive a packet * @rcd: the qlogic_ib context * @llic: gets count of good packets needed to clear lli, * (used with chips that need need to track crcs for lli) * * called from interrupt handler for errors or receive interrupt * Returns number of CRC error packets, needed by some chips for * local link integrity tracking. crcs are adjusted down by following * good packets, if any, and count of good packets is also tracked. */ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts) { struct qib_devdata *dd = rcd->dd; struct qib_pportdata *ppd = rcd->ppd; __le32 *rhf_addr; void *ebuf; const u32 rsize = dd->rcvhdrentsize; /* words */ const u32 maxcnt = dd->rcvhdrcnt * rsize; /* words */ u32 etail = -1, l, hdrqtail; struct qib_message_header *hdr; u32 eflags, etype, tlen, i = 0, updegr = 0, crcs = 0; int last; u64 lval; struct qib_qp *qp, *nqp; l = rcd->head; rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset; if (dd->flags & QIB_NODMA_RTAIL) { u32 seq = qib_hdrget_seq(rhf_addr); if (seq != rcd->seq_cnt) { qib_cdbg(PKT, "ctxt%u: hdrq seq diff @ hdrqhd %x\n", rcd->ctxt, rcd->head); goto bail; } hdrqtail = 0; } else { hdrqtail = qib_get_rcvhdrtail(rcd); if (l == hdrqtail) { qib_cdbg(PKT, "ctxt%u: no pkts tail==hdrqhd %x\n", rcd->ctxt, rcd->head); goto bail; } smp_rmb(); /* prevent speculative reads of dma'ed hdrq */ } for (last = 0, i = 1; !last; i += !last) { hdr = dd->f_get_msgheader(dd, rhf_addr); eflags = qib_hdrget_err_flags(rhf_addr); etype = qib_hdrget_rcv_type(rhf_addr); /* total length */ tlen = qib_hdrget_length_in_bytes(rhf_addr); ebuf = NULL; if ((dd->flags & QIB_NODMA_RTAIL) ? qib_hdrget_use_egr_buf(rhf_addr) : (etype != RCVHQ_RCV_TYPE_EXPECTED)) { etail = qib_hdrget_index(rhf_addr); updegr = 1; if (tlen > sizeof(*hdr) || etype >= RCVHQ_RCV_TYPE_NON_KD) ebuf = qib_get_egrbuf(rcd, etail); } if (!eflags) { u16 lrh_len = be16_to_cpu(hdr->lrh[2]) << 2; if (lrh_len != tlen) { qib_dbg("IB%u:%u ctxt %u lrh_len %u " "!= tlen %u\n", dd->unit, ppd->port, rcd->ctxt, lrh_len, tlen); qib_stats.sps_lenerrs++; goto move_along; } } if (etype == RCVHQ_RCV_TYPE_NON_KD && !eflags && ebuf == NULL && tlen > (dd->rcvhdrentsize - 2 + 1 - qib_hdrget_offset(rhf_addr)) << 2) { qib_dbg("IB%d ctxt %u NULL data rhf %08x%08x tlen %u\n", ppd->port, rcd->ctxt, le32_to_cpu(rhf_addr[1]), le32_to_cpu(rhf_addr[0]), tlen); goto move_along; } /* * Both tiderr and qibhdrerr are set for all plain IB * packets; only qibhdrerr should be set. */ if (etype != RCVHQ_RCV_TYPE_NON_KD && etype != RCVHQ_RCV_TYPE_ERROR && qib_hdrget_qib_ver(hdr->iph.ver_ctxt_tid_offset) != IPS_PROTO_VERSION) qib_cdbg(ERRPKT, "Bad InfiniPath protocol version " "%x\n", etype); if (unlikely(eflags)) crcs += qib_rcv_hdrerr(ppd, rcd->ctxt, eflags, l, etail, rhf_addr, hdr); else if (etype == RCVHQ_RCV_TYPE_NON_KD) { qib_ib_rcv(rcd, hdr, ebuf, tlen); if (crcs) crcs--; else if (llic && *llic) --*llic; } else if (etype == RCVHQ_RCV_TYPE_EXPECTED) qib_cdbg(ERRPKT, "type=Expected pkt, no err bits\n"); else if (etype == RCVHQ_RCV_TYPE_EAGER) { u8 opcode = be32_to_cpu(hdr->bth[0]) >> 24; u32 qpn = be32_to_cpu(hdr->bth[1]) & 0xffffff; qib_cdbg(RVPKT, "typ %x, opcode %x (eager, " "qp=%x), len %x; ignored\n", etype, opcode, qpn, tlen); } else { /* * Error packet, type of error unknown. * Probably type 3, but we don't know, so don't * even try to print the opcode, etc. * Usually caused by a "bad packet", that has no * BTH, when the LRH says it should, or it's * a KD packet with an invalid KDETH. */ qib_cdbg(ERRPKT, "Error Pkt, but no eflags! egrbuf" " %x, len %x hdrq+%x rhf: %Lx\n", etail, tlen, l, le64_to_cpu(*(__le64 *) rhf_addr)); if (qib_debug & __QIB_ERRPKTDBG) { u32 j, *d, dw = rsize - 2; if (rsize > (tlen >> 2)) dw = tlen >> 2; d = (u32 *)hdr; printk(KERN_DEBUG "EPkt rcvhdr(%x dw):\n", dw); for (j = 0; j < dw; j++) printk(KERN_DEBUG "%8x%s", d[j], (j%8) == 7 ? "\n" : " "); printk(KERN_DEBUG ".\n"); } } move_along: l += rsize; if (l >= maxcnt) l = 0; rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset; if (dd->flags & QIB_NODMA_RTAIL) { u32 seq = qib_hdrget_seq(rhf_addr); if (++rcd->seq_cnt > 13) rcd->seq_cnt = 1; if (seq != rcd->seq_cnt) last = 1; } else if (l == hdrqtail) last = 1; /* * Update head regs etc., every 16 packets, if not last pkt, * to help prevent rcvhdrq overflows, when many packets * are processed and queue is nearly full. * Don't request an interrupt for intermediate updates. */ lval = l; if (!last && !(i & 0xf)) { dd->f_update_usrhead(rcd, lval, updegr, etail); updegr = 0; } } rcd->head = l; rcd->pkt_count += i; /* * Iterate over all QPs waiting to respond. * The list won't change since the IRQ is only run on one CPU. */ list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) { list_del_init(&qp->rspwait); if (qp->r_flags & QIB_R_RSP_NAK) { qp->r_flags &= ~QIB_R_RSP_NAK; qib_send_rc_ack(qp); } if (qp->r_flags & QIB_R_RSP_SEND) { unsigned long flags; qp->r_flags &= ~QIB_R_RSP_SEND; spin_lock_irqsave(&qp->s_lock, flags); if (ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND) qib_schedule_send(qp); spin_unlock_irqrestore(&qp->s_lock, flags); } if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); } qib_cdbg(PKT, "IB%d ctxt %d handled %u packets\n", ppd->port, rcd->ctxt, i); bail: /* Report number of packets consumed */ if (npkts) *npkts = i; /* * Always write head at end, and setup rcv interrupt, even * if no packets were processed. */ lval = (u64)rcd->head | dd->rhdrhead_intr_off; dd->f_update_usrhead(rcd, lval, updegr, etail); return crcs; } /** * qib_set_mtu - set the MTU * @ppd: the perport data * @arg: the new MTU * * We can handle "any" incoming size, the issue here is whether we * need to restrict our outgoing size. For now, we don't do any * sanity checking on this, and we don't deal with what happens to * programs that are already running when the size changes. * NOTE: changing the MTU will usually cause the IBC to go back to * link INIT state... */ int qib_set_mtu(struct qib_pportdata *ppd, u16 arg) { u32 piosize; int ret, chk; if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 && arg != 4096) { qib_dbg("Trying to set invalid mtu %u, failing\n", arg); ret = -EINVAL; goto bail; } chk = ib_mtu_enum_to_int(qib_ibmtu); if (chk > 0 && arg > chk) { qib_dbg("Trying to set mtu %u > ibmtu cap %u, failing\n", arg, chk); ret = -EINVAL; goto bail; } piosize = ppd->ibmaxlen; ppd->ibmtu = arg; if (arg >= (piosize - QIB_PIO_MAXIBHDR)) { /* Only if it's not the initial value (or reset to it) */ if (piosize != ppd->init_ibmaxlen) { if (arg > piosize && arg <= ppd->init_ibmaxlen) piosize = ppd->init_ibmaxlen - 2 * sizeof(u32); ppd->ibmaxlen = piosize; } } else if ((arg + QIB_PIO_MAXIBHDR) != ppd->ibmaxlen) { piosize = arg + QIB_PIO_MAXIBHDR - 2 * sizeof(u32); ppd->ibmaxlen = piosize; qib_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x " "(mtu 0x%x)\n", ppd->ibmaxlen, piosize, arg); } ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_MTU, 0); ret = 0; bail: return ret; } int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc) { struct qib_devdata *dd = ppd->dd; ppd->lid = lid; ppd->lmc = lmc; dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LIDLMC, lid | (~((1U << lmc) - 1)) << 16); qib_devinfo(dd->pcidev, "IB%u:%u got a lid: 0x%x\n", dd->unit, ppd->port, lid); return 0; } /* * Following deal with the "obviously simple" task of overriding the state * of the LEDS, which normally indicate link physical and logical status. * The complications arise in dealing with different hardware mappings * and the board-dependent routine being called from interrupts. * and then there's the requirement to _flash_ them. */ #define LED_OVER_FREQ_SHIFT 8 #define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT) /* Below is "non-zero" to force override, but both actual LEDs are off */ #define LED_OVER_BOTH_OFF (8) static void qib_run_led_override(unsigned long opaque) { struct qib_pportdata *ppd = (struct qib_pportdata *)opaque; struct qib_devdata *dd = ppd->dd; int timeoff; int ph_idx; if (!(dd->flags & QIB_INITTED)) return; ph_idx = ppd->led_override_phase++ & 1; ppd->led_override = ppd->led_override_vals[ph_idx]; timeoff = ppd->led_override_timeoff; dd->f_setextled(ppd, 1); /* * don't re-fire the timer if user asked for it to be off; we let * it fire one more time after they turn it off to simplify */ if (ppd->led_override_vals[0] || ppd->led_override_vals[1]) mod_timer(&ppd->led_override_timer, jiffies + timeoff); } void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val) { struct qib_devdata *dd = ppd->dd; int timeoff, freq; if (!(dd->flags & QIB_INITTED)) return; /* First check if we are blinking. If not, use 1HZ polling */ timeoff = HZ; freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT; if (freq) { /* For blink, set each phase from one nybble of val */ ppd->led_override_vals[0] = val & 0xF; ppd->led_override_vals[1] = (val >> 4) & 0xF; timeoff = (HZ << 4)/freq; } else { /* Non-blink set both phases the same. */ ppd->led_override_vals[0] = val & 0xF; ppd->led_override_vals[1] = val & 0xF; } ppd->led_override_timeoff = timeoff; /* * If the timer has not already been started, do so. Use a "quick" * timeout so the function will be called soon, to look at our request. */ if (atomic_inc_return(&ppd->led_override_timer_active) == 1) { /* Need to start timer */ init_timer(&ppd->led_override_timer); ppd->led_override_timer.function = qib_run_led_override; ppd->led_override_timer.data = (unsigned long) ppd; ppd->led_override_timer.expires = jiffies + 1; add_timer(&ppd->led_override_timer); } else { if (ppd->led_override_vals[0] || ppd->led_override_vals[1]) mod_timer(&ppd->led_override_timer, jiffies + 1); atomic_dec(&ppd->led_override_timer_active); } } /** * qib_reset_device - reset the chip if possible * @unit: the device to reset * * Whether or not reset is successful, we attempt to re-initialize the chip * (that is, much like a driver unload/reload). We clear the INITTED flag * so that the various entry points will fail until we reinitialize. For * now, we only allow this if no user contexts are open that use chip resources */ int qib_reset_device(int unit) { int ret, i; struct qib_devdata *dd = qib_lookup(unit); struct qib_pportdata *ppd; unsigned long flags; int pidx; if (!dd) { ret = -ENODEV; goto bail; } qib_devinfo(dd->pcidev, "Reset on unit %u requested\n", unit); if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) { qib_devinfo(dd->pcidev, "Invalid unit number %u or " "not initialized or not present\n", unit); ret = -ENXIO; goto bail; } spin_lock_irqsave(&dd->uctxt_lock, flags); if (dd->rcd) for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) { if (!dd->rcd[i] || !dd->rcd[i]->cnt) continue; spin_unlock_irqrestore(&dd->uctxt_lock, flags); qib_dbg("unit %u ctxt %d is in use (PID %u cmd %s)," " can't reset\n", unit, i, dd->rcd[i]->pid, dd->rcd[i]->comm); ret = -EBUSY; goto bail; } spin_unlock_irqrestore(&dd->uctxt_lock, flags); for (pidx = 0; pidx < dd->num_pports; ++pidx) { ppd = dd->pport + pidx; if (atomic_read(&ppd->led_override_timer_active)) { /* Need to stop LED timer, _then_ shut off LEDs */ del_timer_sync(&ppd->led_override_timer); atomic_set(&ppd->led_override_timer_active, 0); } /* Shut off LEDs after we are sure timer is not running */ ppd->led_override = LED_OVER_BOTH_OFF; dd->f_setextled(ppd, 0); if (dd->flags & QIB_HAS_SEND_DMA) teardown_sdma(ppd); } ret = dd->f_reset(dd); if (ret == 1) { qib_dbg("Reinitializing unit %u after reset attempt\n", unit); ret = qib_init(dd, 1); } else ret = -EAGAIN; if (ret) qib_dev_err(dd, "Reinitialize unit %u after " "reset failed with %d\n", unit, ret); else qib_devinfo(dd->pcidev, "Reinitialized unit %u after " "resetting\n", unit); bail: return ret; }
gpl-2.0
mayli/wrapfs-latest
net/batman-adv/gateway_client.c
264
24549
/* Copyright (C) 2009-2014 B.A.T.M.A.N. contributors: * * Marek Lindner * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #include "main.h" #include "sysfs.h" #include "gateway_client.h" #include "gateway_common.h" #include "hard-interface.h" #include "originator.h" #include "translation-table.h" #include "routing.h" #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/udp.h> #include <linux/if_vlan.h> /* These are the offsets of the "hw type" and "hw address length" in the dhcp * packet starting at the beginning of the dhcp header */ #define BATADV_DHCP_HTYPE_OFFSET 1 #define BATADV_DHCP_HLEN_OFFSET 2 /* Value of htype representing Ethernet */ #define BATADV_DHCP_HTYPE_ETHERNET 0x01 /* This is the offset of the "chaddr" field in the dhcp packet starting at the * beginning of the dhcp header */ #define BATADV_DHCP_CHADDR_OFFSET 28 static void batadv_gw_node_free_ref(struct batadv_gw_node *gw_node) { if (atomic_dec_and_test(&gw_node->refcount)) { batadv_orig_node_free_ref(gw_node->orig_node); kfree_rcu(gw_node, rcu); } } static struct batadv_gw_node * batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv) { struct batadv_gw_node *gw_node; rcu_read_lock(); gw_node = rcu_dereference(bat_priv->gw.curr_gw); if (!gw_node) goto out; if (!atomic_inc_not_zero(&gw_node->refcount)) gw_node = NULL; out: rcu_read_unlock(); return gw_node; } struct batadv_orig_node * batadv_gw_get_selected_orig(struct batadv_priv *bat_priv) { struct batadv_gw_node *gw_node; struct batadv_orig_node *orig_node = NULL; gw_node = batadv_gw_get_selected_gw_node(bat_priv); if (!gw_node) goto out; rcu_read_lock(); orig_node = gw_node->orig_node; if (!orig_node) goto unlock; if (!atomic_inc_not_zero(&orig_node->refcount)) orig_node = NULL; unlock: rcu_read_unlock(); out: if (gw_node) batadv_gw_node_free_ref(gw_node); return orig_node; } static void batadv_gw_select(struct batadv_priv *bat_priv, struct batadv_gw_node *new_gw_node) { struct batadv_gw_node *curr_gw_node; spin_lock_bh(&bat_priv->gw.list_lock); if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount)) new_gw_node = NULL; curr_gw_node = rcu_dereference_protected(bat_priv->gw.curr_gw, 1); rcu_assign_pointer(bat_priv->gw.curr_gw, new_gw_node); if (curr_gw_node) batadv_gw_node_free_ref(curr_gw_node); spin_unlock_bh(&bat_priv->gw.list_lock); } /** * batadv_gw_reselect - force a gateway reselection * @bat_priv: the bat priv with all the soft interface information * * Set a flag to remind the GW component to perform a new gateway reselection. * However this function does not ensure that the current gateway is going to be * deselected. The reselection mechanism may elect the same gateway once again. * * This means that invoking batadv_gw_reselect() does not guarantee a gateway * change and therefore a uevent is not necessarily expected. */ void batadv_gw_reselect(struct batadv_priv *bat_priv) { atomic_set(&bat_priv->gw.reselect, 1); } static struct batadv_gw_node * batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv) { struct batadv_neigh_node *router; struct batadv_neigh_ifinfo *router_ifinfo; struct batadv_gw_node *gw_node, *curr_gw = NULL; uint32_t max_gw_factor = 0, tmp_gw_factor = 0; uint32_t gw_divisor; uint8_t max_tq = 0; uint8_t tq_avg; struct batadv_orig_node *orig_node; gw_divisor = BATADV_TQ_LOCAL_WINDOW_SIZE * BATADV_TQ_LOCAL_WINDOW_SIZE; gw_divisor *= 64; rcu_read_lock(); hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) { if (gw_node->deleted) continue; orig_node = gw_node->orig_node; router = batadv_orig_router_get(orig_node, BATADV_IF_DEFAULT); if (!router) continue; router_ifinfo = batadv_neigh_ifinfo_get(router, BATADV_IF_DEFAULT); if (!router_ifinfo) goto next; if (!atomic_inc_not_zero(&gw_node->refcount)) goto next; tq_avg = router_ifinfo->bat_iv.tq_avg; switch (atomic_read(&bat_priv->gw_sel_class)) { case 1: /* fast connection */ tmp_gw_factor = tq_avg * tq_avg; tmp_gw_factor *= gw_node->bandwidth_down; tmp_gw_factor *= 100 * 100; tmp_gw_factor /= gw_divisor; if ((tmp_gw_factor > max_gw_factor) || ((tmp_gw_factor == max_gw_factor) && (tq_avg > max_tq))) { if (curr_gw) batadv_gw_node_free_ref(curr_gw); curr_gw = gw_node; atomic_inc(&curr_gw->refcount); } break; default: /* 2: stable connection (use best statistic) * 3: fast-switch (use best statistic but change as * soon as a better gateway appears) * XX: late-switch (use best statistic but change as * soon as a better gateway appears which has * $routing_class more tq points) */ if (tq_avg > max_tq) { if (curr_gw) batadv_gw_node_free_ref(curr_gw); curr_gw = gw_node; atomic_inc(&curr_gw->refcount); } break; } if (tq_avg > max_tq) max_tq = tq_avg; if (tmp_gw_factor > max_gw_factor) max_gw_factor = tmp_gw_factor; batadv_gw_node_free_ref(gw_node); next: batadv_neigh_node_free_ref(router); if (router_ifinfo) batadv_neigh_ifinfo_free_ref(router_ifinfo); } rcu_read_unlock(); return curr_gw; } /** * batadv_gw_check_client_stop - check if client mode has been switched off * @bat_priv: the bat priv with all the soft interface information * * This function assumes the caller has checked that the gw state *is actually * changing*. This function is not supposed to be called when there is no state * change. */ void batadv_gw_check_client_stop(struct batadv_priv *bat_priv) { struct batadv_gw_node *curr_gw; if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT) return; curr_gw = batadv_gw_get_selected_gw_node(bat_priv); if (!curr_gw) return; /* deselect the current gateway so that next time that client mode is * enabled a proper GW_ADD event can be sent */ batadv_gw_select(bat_priv, NULL); /* if batman-adv is switching the gw client mode off and a gateway was * already selected, send a DEL uevent */ batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_DEL, NULL); batadv_gw_node_free_ref(curr_gw); } void batadv_gw_election(struct batadv_priv *bat_priv) { struct batadv_gw_node *curr_gw = NULL, *next_gw = NULL; struct batadv_neigh_node *router = NULL; struct batadv_neigh_ifinfo *router_ifinfo = NULL; char gw_addr[18] = { '\0' }; if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT) goto out; curr_gw = batadv_gw_get_selected_gw_node(bat_priv); if (!batadv_atomic_dec_not_zero(&bat_priv->gw.reselect) && curr_gw) goto out; next_gw = batadv_gw_get_best_gw_node(bat_priv); if (curr_gw == next_gw) goto out; if (next_gw) { sprintf(gw_addr, "%pM", next_gw->orig_node->orig); router = batadv_orig_router_get(next_gw->orig_node, BATADV_IF_DEFAULT); if (!router) { batadv_gw_reselect(bat_priv); goto out; } router_ifinfo = batadv_neigh_ifinfo_get(router, BATADV_IF_DEFAULT); if (!router_ifinfo) { batadv_gw_reselect(bat_priv); goto out; } } if ((curr_gw) && (!next_gw)) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Removing selected gateway - no gateway in range\n"); batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_DEL, NULL); } else if ((!curr_gw) && (next_gw)) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Adding route to gateway %pM (bandwidth: %u.%u/%u.%u MBit, tq: %i)\n", next_gw->orig_node->orig, next_gw->bandwidth_down / 10, next_gw->bandwidth_down % 10, next_gw->bandwidth_up / 10, next_gw->bandwidth_up % 10, router_ifinfo->bat_iv.tq_avg); batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_ADD, gw_addr); } else { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Changing route to gateway %pM (bandwidth: %u.%u/%u.%u MBit, tq: %i)\n", next_gw->orig_node->orig, next_gw->bandwidth_down / 10, next_gw->bandwidth_down % 10, next_gw->bandwidth_up / 10, next_gw->bandwidth_up % 10, router_ifinfo->bat_iv.tq_avg); batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_CHANGE, gw_addr); } batadv_gw_select(bat_priv, next_gw); out: if (curr_gw) batadv_gw_node_free_ref(curr_gw); if (next_gw) batadv_gw_node_free_ref(next_gw); if (router) batadv_neigh_node_free_ref(router); if (router_ifinfo) batadv_neigh_ifinfo_free_ref(router_ifinfo); } void batadv_gw_check_election(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node) { struct batadv_neigh_ifinfo *router_orig_tq = NULL; struct batadv_neigh_ifinfo *router_gw_tq = NULL; struct batadv_orig_node *curr_gw_orig; struct batadv_neigh_node *router_gw = NULL, *router_orig = NULL; uint8_t gw_tq_avg, orig_tq_avg; curr_gw_orig = batadv_gw_get_selected_orig(bat_priv); if (!curr_gw_orig) goto reselect; router_gw = batadv_orig_router_get(curr_gw_orig, BATADV_IF_DEFAULT); if (!router_gw) goto reselect; router_gw_tq = batadv_neigh_ifinfo_get(router_gw, BATADV_IF_DEFAULT); if (!router_gw_tq) goto reselect; /* this node already is the gateway */ if (curr_gw_orig == orig_node) goto out; router_orig = batadv_orig_router_get(orig_node, BATADV_IF_DEFAULT); if (!router_orig) goto out; router_orig_tq = batadv_neigh_ifinfo_get(router_orig, BATADV_IF_DEFAULT); if (!router_orig_tq) goto out; gw_tq_avg = router_gw_tq->bat_iv.tq_avg; orig_tq_avg = router_orig_tq->bat_iv.tq_avg; /* the TQ value has to be better */ if (orig_tq_avg < gw_tq_avg) goto out; /* if the routing class is greater than 3 the value tells us how much * greater the TQ value of the new gateway must be */ if ((atomic_read(&bat_priv->gw_sel_class) > 3) && (orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw_sel_class))) goto out; batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Restarting gateway selection: better gateway found (tq curr: %i, tq new: %i)\n", gw_tq_avg, orig_tq_avg); reselect: batadv_gw_reselect(bat_priv); out: if (curr_gw_orig) batadv_orig_node_free_ref(curr_gw_orig); if (router_gw) batadv_neigh_node_free_ref(router_gw); if (router_orig) batadv_neigh_node_free_ref(router_orig); if (router_gw_tq) batadv_neigh_ifinfo_free_ref(router_gw_tq); if (router_orig_tq) batadv_neigh_ifinfo_free_ref(router_orig_tq); } /** * batadv_gw_node_add - add gateway node to list of available gateways * @bat_priv: the bat priv with all the soft interface information * @orig_node: originator announcing gateway capabilities * @gateway: announced bandwidth information */ static void batadv_gw_node_add(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node, struct batadv_tvlv_gateway_data *gateway) { struct batadv_gw_node *gw_node; if (gateway->bandwidth_down == 0) return; if (!atomic_inc_not_zero(&orig_node->refcount)) return; gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC); if (!gw_node) { batadv_orig_node_free_ref(orig_node); return; } INIT_HLIST_NODE(&gw_node->list); gw_node->orig_node = orig_node; atomic_set(&gw_node->refcount, 1); spin_lock_bh(&bat_priv->gw.list_lock); hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.list); spin_unlock_bh(&bat_priv->gw.list_lock); batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Found new gateway %pM -> gw bandwidth: %u.%u/%u.%u MBit\n", orig_node->orig, ntohl(gateway->bandwidth_down) / 10, ntohl(gateway->bandwidth_down) % 10, ntohl(gateway->bandwidth_up) / 10, ntohl(gateway->bandwidth_up) % 10); } /** * batadv_gw_node_get - retrieve gateway node from list of available gateways * @bat_priv: the bat priv with all the soft interface information * @orig_node: originator announcing gateway capabilities * * Returns gateway node if found or NULL otherwise. */ static struct batadv_gw_node * batadv_gw_node_get(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node) { struct batadv_gw_node *gw_node_tmp, *gw_node = NULL; rcu_read_lock(); hlist_for_each_entry_rcu(gw_node_tmp, &bat_priv->gw.list, list) { if (gw_node_tmp->orig_node != orig_node) continue; if (gw_node_tmp->deleted) continue; if (!atomic_inc_not_zero(&gw_node_tmp->refcount)) continue; gw_node = gw_node_tmp; break; } rcu_read_unlock(); return gw_node; } /** * batadv_gw_node_update - update list of available gateways with changed * bandwidth information * @bat_priv: the bat priv with all the soft interface information * @orig_node: originator announcing gateway capabilities * @gateway: announced bandwidth information */ void batadv_gw_node_update(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node, struct batadv_tvlv_gateway_data *gateway) { struct batadv_gw_node *gw_node, *curr_gw = NULL; gw_node = batadv_gw_node_get(bat_priv, orig_node); if (!gw_node) { batadv_gw_node_add(bat_priv, orig_node, gateway); goto out; } if ((gw_node->bandwidth_down == ntohl(gateway->bandwidth_down)) && (gw_node->bandwidth_up == ntohl(gateway->bandwidth_up))) goto out; batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Gateway bandwidth of originator %pM changed from %u.%u/%u.%u MBit to %u.%u/%u.%u MBit\n", orig_node->orig, gw_node->bandwidth_down / 10, gw_node->bandwidth_down % 10, gw_node->bandwidth_up / 10, gw_node->bandwidth_up % 10, ntohl(gateway->bandwidth_down) / 10, ntohl(gateway->bandwidth_down) % 10, ntohl(gateway->bandwidth_up) / 10, ntohl(gateway->bandwidth_up) % 10); gw_node->bandwidth_down = ntohl(gateway->bandwidth_down); gw_node->bandwidth_up = ntohl(gateway->bandwidth_up); gw_node->deleted = 0; if (ntohl(gateway->bandwidth_down) == 0) { gw_node->deleted = jiffies; batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Gateway %pM removed from gateway list\n", orig_node->orig); /* Note: We don't need a NULL check here, since curr_gw never * gets dereferenced. */ curr_gw = batadv_gw_get_selected_gw_node(bat_priv); if (gw_node == curr_gw) batadv_gw_reselect(bat_priv); } out: if (curr_gw) batadv_gw_node_free_ref(curr_gw); if (gw_node) batadv_gw_node_free_ref(gw_node); } void batadv_gw_node_delete(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node) { struct batadv_tvlv_gateway_data gateway; gateway.bandwidth_down = 0; gateway.bandwidth_up = 0; batadv_gw_node_update(bat_priv, orig_node, &gateway); } void batadv_gw_node_purge(struct batadv_priv *bat_priv) { struct batadv_gw_node *gw_node, *curr_gw; struct hlist_node *node_tmp; unsigned long timeout = msecs_to_jiffies(2 * BATADV_PURGE_TIMEOUT); int do_reselect = 0; curr_gw = batadv_gw_get_selected_gw_node(bat_priv); spin_lock_bh(&bat_priv->gw.list_lock); hlist_for_each_entry_safe(gw_node, node_tmp, &bat_priv->gw.list, list) { if (((!gw_node->deleted) || (time_before(jiffies, gw_node->deleted + timeout))) && atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE) continue; if (curr_gw == gw_node) do_reselect = 1; hlist_del_rcu(&gw_node->list); batadv_gw_node_free_ref(gw_node); } spin_unlock_bh(&bat_priv->gw.list_lock); /* gw_reselect() needs to acquire the gw_list_lock */ if (do_reselect) batadv_gw_reselect(bat_priv); if (curr_gw) batadv_gw_node_free_ref(curr_gw); } /* fails if orig_node has no router */ static int batadv_write_buffer_text(struct batadv_priv *bat_priv, struct seq_file *seq, const struct batadv_gw_node *gw_node) { struct batadv_gw_node *curr_gw; struct batadv_neigh_node *router; struct batadv_neigh_ifinfo *router_ifinfo = NULL; int ret = -1; router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT); if (!router) goto out; router_ifinfo = batadv_neigh_ifinfo_get(router, BATADV_IF_DEFAULT); if (!router_ifinfo) goto out; curr_gw = batadv_gw_get_selected_gw_node(bat_priv); ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %u.%u/%u.%u MBit\n", (curr_gw == gw_node ? "=>" : " "), gw_node->orig_node->orig, router_ifinfo->bat_iv.tq_avg, router->addr, router->if_incoming->net_dev->name, gw_node->bandwidth_down / 10, gw_node->bandwidth_down % 10, gw_node->bandwidth_up / 10, gw_node->bandwidth_up % 10); if (curr_gw) batadv_gw_node_free_ref(curr_gw); out: if (router_ifinfo) batadv_neigh_ifinfo_free_ref(router_ifinfo); if (router) batadv_neigh_node_free_ref(router); return ret; } int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset) { struct net_device *net_dev = (struct net_device *)seq->private; struct batadv_priv *bat_priv = netdev_priv(net_dev); struct batadv_hard_iface *primary_if; struct batadv_gw_node *gw_node; int gw_count = 0; primary_if = batadv_seq_print_text_primary_if_get(seq); if (!primary_if) goto out; seq_printf(seq, " %-12s (%s/%i) %17s [%10s]: advertised uplink bandwidth ... [B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n", "Gateway", "#", BATADV_TQ_MAX_VALUE, "Nexthop", "outgoingIF", BATADV_SOURCE_VERSION, primary_if->net_dev->name, primary_if->net_dev->dev_addr, net_dev->name); rcu_read_lock(); hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) { if (gw_node->deleted) continue; /* fails if orig_node has no router */ if (batadv_write_buffer_text(bat_priv, seq, gw_node) < 0) continue; gw_count++; } rcu_read_unlock(); if (gw_count == 0) seq_puts(seq, "No gateways in range ...\n"); out: if (primary_if) batadv_hardif_free_ref(primary_if); return 0; } /** * batadv_gw_dhcp_recipient_get - check if a packet is a DHCP message * @skb: the packet to check * @header_len: a pointer to the batman-adv header size * @chaddr: buffer where the client address will be stored. Valid * only if the function returns BATADV_DHCP_TO_CLIENT * * Returns: * - BATADV_DHCP_NO if the packet is not a dhcp message or if there was an error * while parsing it * - BATADV_DHCP_TO_SERVER if this is a message going to the DHCP server * - BATADV_DHCP_TO_CLIENT if this is a message going to a DHCP client * * This function may re-allocate the data buffer of the skb passed as argument. */ enum batadv_dhcp_recipient batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len, uint8_t *chaddr) { enum batadv_dhcp_recipient ret = BATADV_DHCP_NO; struct ethhdr *ethhdr; struct iphdr *iphdr; struct ipv6hdr *ipv6hdr; struct udphdr *udphdr; struct vlan_ethhdr *vhdr; int chaddr_offset; __be16 proto; uint8_t *p; /* check for ethernet header */ if (!pskb_may_pull(skb, *header_len + ETH_HLEN)) return BATADV_DHCP_NO; ethhdr = eth_hdr(skb); proto = ethhdr->h_proto; *header_len += ETH_HLEN; /* check for initial vlan header */ if (proto == htons(ETH_P_8021Q)) { if (!pskb_may_pull(skb, *header_len + VLAN_HLEN)) return BATADV_DHCP_NO; vhdr = vlan_eth_hdr(skb); proto = vhdr->h_vlan_encapsulated_proto; *header_len += VLAN_HLEN; } /* check for ip header */ switch (proto) { case htons(ETH_P_IP): if (!pskb_may_pull(skb, *header_len + sizeof(*iphdr))) return BATADV_DHCP_NO; iphdr = (struct iphdr *)(skb->data + *header_len); *header_len += iphdr->ihl * 4; /* check for udp header */ if (iphdr->protocol != IPPROTO_UDP) return BATADV_DHCP_NO; break; case htons(ETH_P_IPV6): if (!pskb_may_pull(skb, *header_len + sizeof(*ipv6hdr))) return BATADV_DHCP_NO; ipv6hdr = (struct ipv6hdr *)(skb->data + *header_len); *header_len += sizeof(*ipv6hdr); /* check for udp header */ if (ipv6hdr->nexthdr != IPPROTO_UDP) return BATADV_DHCP_NO; break; default: return BATADV_DHCP_NO; } if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr))) return BATADV_DHCP_NO; /* skb->data might have been reallocated by pskb_may_pull() */ ethhdr = eth_hdr(skb); if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN); udphdr = (struct udphdr *)(skb->data + *header_len); *header_len += sizeof(*udphdr); /* check for bootp port */ switch (proto) { case htons(ETH_P_IP): if (udphdr->dest == htons(67)) ret = BATADV_DHCP_TO_SERVER; else if (udphdr->source == htons(67)) ret = BATADV_DHCP_TO_CLIENT; break; case htons(ETH_P_IPV6): if (udphdr->dest == htons(547)) ret = BATADV_DHCP_TO_SERVER; else if (udphdr->source == htons(547)) ret = BATADV_DHCP_TO_CLIENT; break; } chaddr_offset = *header_len + BATADV_DHCP_CHADDR_OFFSET; /* store the client address if the message is going to a client */ if (ret == BATADV_DHCP_TO_CLIENT && pskb_may_pull(skb, chaddr_offset + ETH_ALEN)) { /* check if the DHCP packet carries an Ethernet DHCP */ p = skb->data + *header_len + BATADV_DHCP_HTYPE_OFFSET; if (*p != BATADV_DHCP_HTYPE_ETHERNET) return BATADV_DHCP_NO; /* check if the DHCP packet carries a valid Ethernet address */ p = skb->data + *header_len + BATADV_DHCP_HLEN_OFFSET; if (*p != ETH_ALEN) return BATADV_DHCP_NO; ether_addr_copy(chaddr, skb->data + chaddr_offset); } return ret; } /** * batadv_gw_out_of_range - check if the dhcp request destination is the best gw * @bat_priv: the bat priv with all the soft interface information * @skb: the outgoing packet * * Check if the skb is a DHCP request and if it is sent to the current best GW * server. Due to topology changes it may be the case that the GW server * previously selected is not the best one anymore. * * Returns true if the packet destination is unicast and it is not the best gw, * false otherwise. * * This call might reallocate skb data. * Must be invoked only when the DHCP packet is going TO a DHCP SERVER. */ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, struct sk_buff *skb) { struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL; struct batadv_orig_node *orig_dst_node = NULL; struct batadv_gw_node *gw_node = NULL, *curr_gw = NULL; struct batadv_neigh_ifinfo *curr_ifinfo, *old_ifinfo; struct ethhdr *ethhdr = (struct ethhdr *)skb->data; bool out_of_range = false; uint8_t curr_tq_avg; unsigned short vid; vid = batadv_get_vid(skb, 0); orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source, ethhdr->h_dest, vid); if (!orig_dst_node) goto out; gw_node = batadv_gw_node_get(bat_priv, orig_dst_node); if (!gw_node->bandwidth_down == 0) goto out; switch (atomic_read(&bat_priv->gw_mode)) { case BATADV_GW_MODE_SERVER: /* If we are a GW then we are our best GW. We can artificially * set the tq towards ourself as the maximum value */ curr_tq_avg = BATADV_TQ_MAX_VALUE; break; case BATADV_GW_MODE_CLIENT: curr_gw = batadv_gw_get_selected_gw_node(bat_priv); if (!curr_gw) goto out; /* packet is going to our gateway */ if (curr_gw->orig_node == orig_dst_node) goto out; /* If the dhcp packet has been sent to a different gw, * we have to evaluate whether the old gw is still * reliable enough */ neigh_curr = batadv_find_router(bat_priv, curr_gw->orig_node, NULL); if (!neigh_curr) goto out; curr_ifinfo = batadv_neigh_ifinfo_get(neigh_curr, BATADV_IF_DEFAULT); if (!curr_ifinfo) goto out; curr_tq_avg = curr_ifinfo->bat_iv.tq_avg; batadv_neigh_ifinfo_free_ref(curr_ifinfo); break; case BATADV_GW_MODE_OFF: default: goto out; } neigh_old = batadv_find_router(bat_priv, orig_dst_node, NULL); if (!neigh_old) goto out; old_ifinfo = batadv_neigh_ifinfo_get(neigh_old, BATADV_IF_DEFAULT); if (!old_ifinfo) goto out; if ((curr_tq_avg - old_ifinfo->bat_iv.tq_avg) > BATADV_GW_THRESHOLD) out_of_range = true; batadv_neigh_ifinfo_free_ref(old_ifinfo); out: if (orig_dst_node) batadv_orig_node_free_ref(orig_dst_node); if (curr_gw) batadv_gw_node_free_ref(curr_gw); if (gw_node) batadv_gw_node_free_ref(gw_node); if (neigh_old) batadv_neigh_node_free_ref(neigh_old); if (neigh_curr) batadv_neigh_node_free_ref(neigh_curr); return out_of_range; }
gpl-2.0
dholm/linux-ev3
net/sunrpc/xprtrdma/svc_rdma_transport.c
520
38702
/* * Copyright (c) 2005-2007 Network Appliance, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the BSD-type * license below: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Network Appliance, Inc. nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Author: Tom Tucker <tom@opengridcomputing.com> */ #include <linux/sunrpc/svc_xprt.h> #include <linux/sunrpc/debug.h> #include <linux/sunrpc/rpc_rdma.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <rdma/ib_verbs.h> #include <rdma/rdma_cm.h> #include <linux/sunrpc/svc_rdma.h> #define RPCDBG_FACILITY RPCDBG_SVCXPRT static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, struct sockaddr *sa, int salen, int flags); static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt); static void svc_rdma_release_rqst(struct svc_rqst *); static void dto_tasklet_func(unsigned long data); static void svc_rdma_detach(struct svc_xprt *xprt); static void svc_rdma_free(struct svc_xprt *xprt); static int svc_rdma_has_wspace(struct svc_xprt *xprt); static void rq_cq_reap(struct svcxprt_rdma *xprt); static void sq_cq_reap(struct svcxprt_rdma *xprt); static DECLARE_TASKLET(dto_tasklet, dto_tasklet_func, 0UL); static DEFINE_SPINLOCK(dto_lock); static LIST_HEAD(dto_xprt_q); static struct svc_xprt_ops svc_rdma_ops = { .xpo_create = svc_rdma_create, .xpo_recvfrom = svc_rdma_recvfrom, .xpo_sendto = svc_rdma_sendto, .xpo_release_rqst = svc_rdma_release_rqst, .xpo_detach = svc_rdma_detach, .xpo_free = svc_rdma_free, .xpo_prep_reply_hdr = svc_rdma_prep_reply_hdr, .xpo_has_wspace = svc_rdma_has_wspace, .xpo_accept = svc_rdma_accept, }; struct svc_xprt_class svc_rdma_class = { .xcl_name = "rdma", .xcl_owner = THIS_MODULE, .xcl_ops = &svc_rdma_ops, .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP, }; /* WR context cache. Created in svc_rdma.c */ extern struct kmem_cache *svc_rdma_ctxt_cachep; struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt) { struct svc_rdma_op_ctxt *ctxt; while (1) { ctxt = kmem_cache_alloc(svc_rdma_ctxt_cachep, GFP_KERNEL); if (ctxt) break; schedule_timeout_uninterruptible(msecs_to_jiffies(500)); } ctxt->xprt = xprt; INIT_LIST_HEAD(&ctxt->dto_q); ctxt->count = 0; ctxt->frmr = NULL; atomic_inc(&xprt->sc_ctxt_used); return ctxt; } void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt) { struct svcxprt_rdma *xprt = ctxt->xprt; int i; for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) { /* * Unmap the DMA addr in the SGE if the lkey matches * the sc_dma_lkey, otherwise, ignore it since it is * an FRMR lkey and will be unmapped later when the * last WR that uses it completes. */ if (ctxt->sge[i].lkey == xprt->sc_dma_lkey) { atomic_dec(&xprt->sc_dma_used); ib_dma_unmap_single(xprt->sc_cm_id->device, ctxt->sge[i].addr, ctxt->sge[i].length, ctxt->direction); } } } void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages) { struct svcxprt_rdma *xprt; int i; BUG_ON(!ctxt); xprt = ctxt->xprt; if (free_pages) for (i = 0; i < ctxt->count; i++) put_page(ctxt->pages[i]); kmem_cache_free(svc_rdma_ctxt_cachep, ctxt); atomic_dec(&xprt->sc_ctxt_used); } /* Temporary NFS request map cache. Created in svc_rdma.c */ extern struct kmem_cache *svc_rdma_map_cachep; /* * Temporary NFS req mappings are shared across all transport * instances. These are short lived and should be bounded by the number * of concurrent server threads * depth of the SQ. */ struct svc_rdma_req_map *svc_rdma_get_req_map(void) { struct svc_rdma_req_map *map; while (1) { map = kmem_cache_alloc(svc_rdma_map_cachep, GFP_KERNEL); if (map) break; schedule_timeout_uninterruptible(msecs_to_jiffies(500)); } map->count = 0; map->frmr = NULL; return map; } void svc_rdma_put_req_map(struct svc_rdma_req_map *map) { kmem_cache_free(svc_rdma_map_cachep, map); } /* ib_cq event handler */ static void cq_event_handler(struct ib_event *event, void *context) { struct svc_xprt *xprt = context; dprintk("svcrdma: received CQ event id=%d, context=%p\n", event->event, context); set_bit(XPT_CLOSE, &xprt->xpt_flags); } /* QP event handler */ static void qp_event_handler(struct ib_event *event, void *context) { struct svc_xprt *xprt = context; switch (event->event) { /* These are considered benign events */ case IB_EVENT_PATH_MIG: case IB_EVENT_COMM_EST: case IB_EVENT_SQ_DRAINED: case IB_EVENT_QP_LAST_WQE_REACHED: dprintk("svcrdma: QP event %d received for QP=%p\n", event->event, event->element.qp); break; /* These are considered fatal events */ case IB_EVENT_PATH_MIG_ERR: case IB_EVENT_QP_FATAL: case IB_EVENT_QP_REQ_ERR: case IB_EVENT_QP_ACCESS_ERR: case IB_EVENT_DEVICE_FATAL: default: dprintk("svcrdma: QP ERROR event %d received for QP=%p, " "closing transport\n", event->event, event->element.qp); set_bit(XPT_CLOSE, &xprt->xpt_flags); break; } } /* * Data Transfer Operation Tasklet * * Walks a list of transports with I/O pending, removing entries as * they are added to the server's I/O pending list. Two bits indicate * if SQ, RQ, or both have I/O pending. The dto_lock is an irqsave * spinlock that serializes access to the transport list with the RQ * and SQ interrupt handlers. */ static void dto_tasklet_func(unsigned long data) { struct svcxprt_rdma *xprt; unsigned long flags; spin_lock_irqsave(&dto_lock, flags); while (!list_empty(&dto_xprt_q)) { xprt = list_entry(dto_xprt_q.next, struct svcxprt_rdma, sc_dto_q); list_del_init(&xprt->sc_dto_q); spin_unlock_irqrestore(&dto_lock, flags); rq_cq_reap(xprt); sq_cq_reap(xprt); svc_xprt_put(&xprt->sc_xprt); spin_lock_irqsave(&dto_lock, flags); } spin_unlock_irqrestore(&dto_lock, flags); } /* * Receive Queue Completion Handler * * Since an RQ completion handler is called on interrupt context, we * need to defer the handling of the I/O to a tasklet */ static void rq_comp_handler(struct ib_cq *cq, void *cq_context) { struct svcxprt_rdma *xprt = cq_context; unsigned long flags; /* Guard against unconditional flush call for destroyed QP */ if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0) return; /* * Set the bit regardless of whether or not it's on the list * because it may be on the list already due to an SQ * completion. */ set_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags); /* * If this transport is not already on the DTO transport queue, * add it */ spin_lock_irqsave(&dto_lock, flags); if (list_empty(&xprt->sc_dto_q)) { svc_xprt_get(&xprt->sc_xprt); list_add_tail(&xprt->sc_dto_q, &dto_xprt_q); } spin_unlock_irqrestore(&dto_lock, flags); /* Tasklet does all the work to avoid irqsave locks. */ tasklet_schedule(&dto_tasklet); } /* * rq_cq_reap - Process the RQ CQ. * * Take all completing WC off the CQE and enqueue the associated DTO * context on the dto_q for the transport. * * Note that caller must hold a transport reference. */ static void rq_cq_reap(struct svcxprt_rdma *xprt) { int ret; struct ib_wc wc; struct svc_rdma_op_ctxt *ctxt = NULL; if (!test_and_clear_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags)) return; ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP); atomic_inc(&rdma_stat_rq_poll); while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) { ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id; ctxt->wc_status = wc.status; ctxt->byte_len = wc.byte_len; svc_rdma_unmap_dma(ctxt); if (wc.status != IB_WC_SUCCESS) { /* Close the transport */ dprintk("svcrdma: transport closing putting ctxt %p\n", ctxt); set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); svc_rdma_put_context(ctxt, 1); svc_xprt_put(&xprt->sc_xprt); continue; } spin_lock_bh(&xprt->sc_rq_dto_lock); list_add_tail(&ctxt->dto_q, &xprt->sc_rq_dto_q); spin_unlock_bh(&xprt->sc_rq_dto_lock); svc_xprt_put(&xprt->sc_xprt); } if (ctxt) atomic_inc(&rdma_stat_rq_prod); set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); /* * If data arrived before established event, * don't enqueue. This defers RPC I/O until the * RDMA connection is complete. */ if (!test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags)) svc_xprt_enqueue(&xprt->sc_xprt); } /* * Processs a completion context */ static void process_context(struct svcxprt_rdma *xprt, struct svc_rdma_op_ctxt *ctxt) { svc_rdma_unmap_dma(ctxt); switch (ctxt->wr_op) { case IB_WR_SEND: if (test_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags)) svc_rdma_put_frmr(xprt, ctxt->frmr); svc_rdma_put_context(ctxt, 1); break; case IB_WR_RDMA_WRITE: svc_rdma_put_context(ctxt, 0); break; case IB_WR_RDMA_READ: case IB_WR_RDMA_READ_WITH_INV: if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) { struct svc_rdma_op_ctxt *read_hdr = ctxt->read_hdr; BUG_ON(!read_hdr); if (test_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags)) svc_rdma_put_frmr(xprt, ctxt->frmr); spin_lock_bh(&xprt->sc_rq_dto_lock); set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); list_add_tail(&read_hdr->dto_q, &xprt->sc_read_complete_q); spin_unlock_bh(&xprt->sc_rq_dto_lock); svc_xprt_enqueue(&xprt->sc_xprt); } svc_rdma_put_context(ctxt, 0); break; default: printk(KERN_ERR "svcrdma: unexpected completion type, " "opcode=%d\n", ctxt->wr_op); break; } } /* * Send Queue Completion Handler - potentially called on interrupt context. * * Note that caller must hold a transport reference. */ static void sq_cq_reap(struct svcxprt_rdma *xprt) { struct svc_rdma_op_ctxt *ctxt = NULL; struct ib_wc wc; struct ib_cq *cq = xprt->sc_sq_cq; int ret; if (!test_and_clear_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags)) return; ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP); atomic_inc(&rdma_stat_sq_poll); while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) { if (wc.status != IB_WC_SUCCESS) /* Close the transport */ set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); /* Decrement used SQ WR count */ atomic_dec(&xprt->sc_sq_count); wake_up(&xprt->sc_send_wait); ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id; if (ctxt) process_context(xprt, ctxt); svc_xprt_put(&xprt->sc_xprt); } if (ctxt) atomic_inc(&rdma_stat_sq_prod); } static void sq_comp_handler(struct ib_cq *cq, void *cq_context) { struct svcxprt_rdma *xprt = cq_context; unsigned long flags; /* Guard against unconditional flush call for destroyed QP */ if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0) return; /* * Set the bit regardless of whether or not it's on the list * because it may be on the list already due to an RQ * completion. */ set_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags); /* * If this transport is not already on the DTO transport queue, * add it */ spin_lock_irqsave(&dto_lock, flags); if (list_empty(&xprt->sc_dto_q)) { svc_xprt_get(&xprt->sc_xprt); list_add_tail(&xprt->sc_dto_q, &dto_xprt_q); } spin_unlock_irqrestore(&dto_lock, flags); /* Tasklet does all the work to avoid irqsave locks. */ tasklet_schedule(&dto_tasklet); } static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv, int listener) { struct svcxprt_rdma *cma_xprt = kzalloc(sizeof *cma_xprt, GFP_KERNEL); if (!cma_xprt) return NULL; svc_xprt_init(&svc_rdma_class, &cma_xprt->sc_xprt, serv); INIT_LIST_HEAD(&cma_xprt->sc_accept_q); INIT_LIST_HEAD(&cma_xprt->sc_dto_q); INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q); INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q); INIT_LIST_HEAD(&cma_xprt->sc_frmr_q); init_waitqueue_head(&cma_xprt->sc_send_wait); spin_lock_init(&cma_xprt->sc_lock); spin_lock_init(&cma_xprt->sc_rq_dto_lock); spin_lock_init(&cma_xprt->sc_frmr_q_lock); cma_xprt->sc_ord = svcrdma_ord; cma_xprt->sc_max_req_size = svcrdma_max_req_size; cma_xprt->sc_max_requests = svcrdma_max_requests; cma_xprt->sc_sq_depth = svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT; atomic_set(&cma_xprt->sc_sq_count, 0); atomic_set(&cma_xprt->sc_ctxt_used, 0); if (listener) set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags); return cma_xprt; } struct page *svc_rdma_get_page(void) { struct page *page; while ((page = alloc_page(GFP_KERNEL)) == NULL) { /* If we can't get memory, wait a bit and try again */ printk(KERN_INFO "svcrdma: out of memory...retrying in 1000 " "jiffies.\n"); schedule_timeout_uninterruptible(msecs_to_jiffies(1000)); } return page; } int svc_rdma_post_recv(struct svcxprt_rdma *xprt) { struct ib_recv_wr recv_wr, *bad_recv_wr; struct svc_rdma_op_ctxt *ctxt; struct page *page; dma_addr_t pa; int sge_no; int buflen; int ret; ctxt = svc_rdma_get_context(xprt); buflen = 0; ctxt->direction = DMA_FROM_DEVICE; for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) { BUG_ON(sge_no >= xprt->sc_max_sge); page = svc_rdma_get_page(); ctxt->pages[sge_no] = page; pa = ib_dma_map_single(xprt->sc_cm_id->device, page_address(page), PAGE_SIZE, DMA_FROM_DEVICE); if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa)) goto err_put_ctxt; atomic_inc(&xprt->sc_dma_used); ctxt->sge[sge_no].addr = pa; ctxt->sge[sge_no].length = PAGE_SIZE; ctxt->sge[sge_no].lkey = xprt->sc_dma_lkey; buflen += PAGE_SIZE; } ctxt->count = sge_no; recv_wr.next = NULL; recv_wr.sg_list = &ctxt->sge[0]; recv_wr.num_sge = ctxt->count; recv_wr.wr_id = (u64)(unsigned long)ctxt; svc_xprt_get(&xprt->sc_xprt); ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr); if (ret) { svc_rdma_unmap_dma(ctxt); svc_rdma_put_context(ctxt, 1); svc_xprt_put(&xprt->sc_xprt); } return ret; err_put_ctxt: svc_rdma_put_context(ctxt, 1); return -ENOMEM; } /* * This function handles the CONNECT_REQUEST event on a listening * endpoint. It is passed the cma_id for the _new_ connection. The context in * this cma_id is inherited from the listening cma_id and is the svc_xprt * structure for the listening endpoint. * * This function creates a new xprt for the new connection and enqueues it on * the accept queue for the listent xprt. When the listen thread is kicked, it * will call the recvfrom method on the listen xprt which will accept the new * connection. */ static void handle_connect_req(struct rdma_cm_id *new_cma_id, size_t client_ird) { struct svcxprt_rdma *listen_xprt = new_cma_id->context; struct svcxprt_rdma *newxprt; struct sockaddr *sa; /* Create a new transport */ newxprt = rdma_create_xprt(listen_xprt->sc_xprt.xpt_server, 0); if (!newxprt) { dprintk("svcrdma: failed to create new transport\n"); return; } newxprt->sc_cm_id = new_cma_id; new_cma_id->context = newxprt; dprintk("svcrdma: Creating newxprt=%p, cm_id=%p, listenxprt=%p\n", newxprt, newxprt->sc_cm_id, listen_xprt); /* Save client advertised inbound read limit for use later in accept. */ newxprt->sc_ord = client_ird; /* Set the local and remote addresses in the transport */ sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr; svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa)); sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr; svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa)); /* * Enqueue the new transport on the accept queue of the listening * transport */ spin_lock_bh(&listen_xprt->sc_lock); list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q); spin_unlock_bh(&listen_xprt->sc_lock); /* * Can't use svc_xprt_received here because we are not on a * rqstp thread */ set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags); svc_xprt_enqueue(&listen_xprt->sc_xprt); } /* * Handles events generated on the listening endpoint. These events will be * either be incoming connect requests or adapter removal events. */ static int rdma_listen_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) { struct svcxprt_rdma *xprt = cma_id->context; int ret = 0; switch (event->event) { case RDMA_CM_EVENT_CONNECT_REQUEST: dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, " "event=%d\n", cma_id, cma_id->context, event->event); handle_connect_req(cma_id, event->param.conn.initiator_depth); break; case RDMA_CM_EVENT_ESTABLISHED: /* Accept complete */ dprintk("svcrdma: Connection completed on LISTEN xprt=%p, " "cm_id=%p\n", xprt, cma_id); break; case RDMA_CM_EVENT_DEVICE_REMOVAL: dprintk("svcrdma: Device removal xprt=%p, cm_id=%p\n", xprt, cma_id); if (xprt) set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); break; default: dprintk("svcrdma: Unexpected event on listening endpoint %p, " "event=%d\n", cma_id, event->event); break; } return ret; } static int rdma_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) { struct svc_xprt *xprt = cma_id->context; struct svcxprt_rdma *rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt); switch (event->event) { case RDMA_CM_EVENT_ESTABLISHED: /* Accept complete */ svc_xprt_get(xprt); dprintk("svcrdma: Connection completed on DTO xprt=%p, " "cm_id=%p\n", xprt, cma_id); clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags); svc_xprt_enqueue(xprt); break; case RDMA_CM_EVENT_DISCONNECTED: dprintk("svcrdma: Disconnect on DTO xprt=%p, cm_id=%p\n", xprt, cma_id); if (xprt) { set_bit(XPT_CLOSE, &xprt->xpt_flags); svc_xprt_enqueue(xprt); svc_xprt_put(xprt); } break; case RDMA_CM_EVENT_DEVICE_REMOVAL: dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, " "event=%d\n", cma_id, xprt, event->event); if (xprt) { set_bit(XPT_CLOSE, &xprt->xpt_flags); svc_xprt_enqueue(xprt); } break; default: dprintk("svcrdma: Unexpected event on DTO endpoint %p, " "event=%d\n", cma_id, event->event); break; } return 0; } /* * Create a listening RDMA service endpoint. */ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, struct sockaddr *sa, int salen, int flags) { struct rdma_cm_id *listen_id; struct svcxprt_rdma *cma_xprt; struct svc_xprt *xprt; int ret; dprintk("svcrdma: Creating RDMA socket\n"); cma_xprt = rdma_create_xprt(serv, 1); if (!cma_xprt) return ERR_PTR(-ENOMEM); xprt = &cma_xprt->sc_xprt; listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP); if (IS_ERR(listen_id)) { ret = PTR_ERR(listen_id); dprintk("svcrdma: rdma_create_id failed = %d\n", ret); goto err0; } ret = rdma_bind_addr(listen_id, sa); if (ret) { dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret); goto err1; } cma_xprt->sc_cm_id = listen_id; ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG); if (ret) { dprintk("svcrdma: rdma_listen failed = %d\n", ret); goto err1; } /* * We need to use the address from the cm_id in case the * caller specified 0 for the port number. */ sa = (struct sockaddr *)&cma_xprt->sc_cm_id->route.addr.src_addr; svc_xprt_set_local(&cma_xprt->sc_xprt, sa, salen); return &cma_xprt->sc_xprt; err1: rdma_destroy_id(listen_id); err0: kfree(cma_xprt); return ERR_PTR(ret); } static struct svc_rdma_fastreg_mr *rdma_alloc_frmr(struct svcxprt_rdma *xprt) { struct ib_mr *mr; struct ib_fast_reg_page_list *pl; struct svc_rdma_fastreg_mr *frmr; frmr = kmalloc(sizeof(*frmr), GFP_KERNEL); if (!frmr) goto err; mr = ib_alloc_fast_reg_mr(xprt->sc_pd, RPCSVC_MAXPAGES); if (IS_ERR(mr)) goto err_free_frmr; pl = ib_alloc_fast_reg_page_list(xprt->sc_cm_id->device, RPCSVC_MAXPAGES); if (IS_ERR(pl)) goto err_free_mr; frmr->mr = mr; frmr->page_list = pl; INIT_LIST_HEAD(&frmr->frmr_list); return frmr; err_free_mr: ib_dereg_mr(mr); err_free_frmr: kfree(frmr); err: return ERR_PTR(-ENOMEM); } static void rdma_dealloc_frmr_q(struct svcxprt_rdma *xprt) { struct svc_rdma_fastreg_mr *frmr; while (!list_empty(&xprt->sc_frmr_q)) { frmr = list_entry(xprt->sc_frmr_q.next, struct svc_rdma_fastreg_mr, frmr_list); list_del_init(&frmr->frmr_list); ib_dereg_mr(frmr->mr); ib_free_fast_reg_page_list(frmr->page_list); kfree(frmr); } } struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *rdma) { struct svc_rdma_fastreg_mr *frmr = NULL; spin_lock_bh(&rdma->sc_frmr_q_lock); if (!list_empty(&rdma->sc_frmr_q)) { frmr = list_entry(rdma->sc_frmr_q.next, struct svc_rdma_fastreg_mr, frmr_list); list_del_init(&frmr->frmr_list); frmr->map_len = 0; frmr->page_list_len = 0; } spin_unlock_bh(&rdma->sc_frmr_q_lock); if (frmr) return frmr; return rdma_alloc_frmr(rdma); } static void frmr_unmap_dma(struct svcxprt_rdma *xprt, struct svc_rdma_fastreg_mr *frmr) { int page_no; for (page_no = 0; page_no < frmr->page_list_len; page_no++) { dma_addr_t addr = frmr->page_list->page_list[page_no]; if (ib_dma_mapping_error(frmr->mr->device, addr)) continue; atomic_dec(&xprt->sc_dma_used); ib_dma_unmap_single(frmr->mr->device, addr, PAGE_SIZE, frmr->direction); } } void svc_rdma_put_frmr(struct svcxprt_rdma *rdma, struct svc_rdma_fastreg_mr *frmr) { if (frmr) { frmr_unmap_dma(rdma, frmr); spin_lock_bh(&rdma->sc_frmr_q_lock); BUG_ON(!list_empty(&frmr->frmr_list)); list_add(&frmr->frmr_list, &rdma->sc_frmr_q); spin_unlock_bh(&rdma->sc_frmr_q_lock); } } /* * This is the xpo_recvfrom function for listening endpoints. Its * purpose is to accept incoming connections. The CMA callback handler * has already created a new transport and attached it to the new CMA * ID. * * There is a queue of pending connections hung on the listening * transport. This queue contains the new svc_xprt structure. This * function takes svc_xprt structures off the accept_q and completes * the connection. */ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) { struct svcxprt_rdma *listen_rdma; struct svcxprt_rdma *newxprt = NULL; struct rdma_conn_param conn_param; struct ib_qp_init_attr qp_attr; struct ib_device_attr devattr; int uninitialized_var(dma_mr_acc); int need_dma_mr; int ret; int i; listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt); clear_bit(XPT_CONN, &xprt->xpt_flags); /* Get the next entry off the accept list */ spin_lock_bh(&listen_rdma->sc_lock); if (!list_empty(&listen_rdma->sc_accept_q)) { newxprt = list_entry(listen_rdma->sc_accept_q.next, struct svcxprt_rdma, sc_accept_q); list_del_init(&newxprt->sc_accept_q); } if (!list_empty(&listen_rdma->sc_accept_q)) set_bit(XPT_CONN, &listen_rdma->sc_xprt.xpt_flags); spin_unlock_bh(&listen_rdma->sc_lock); if (!newxprt) return NULL; dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n", newxprt, newxprt->sc_cm_id); ret = ib_query_device(newxprt->sc_cm_id->device, &devattr); if (ret) { dprintk("svcrdma: could not query device attributes on " "device %p, rc=%d\n", newxprt->sc_cm_id->device, ret); goto errout; } /* Qualify the transport resource defaults with the * capabilities of this particular device */ newxprt->sc_max_sge = min((size_t)devattr.max_sge, (size_t)RPCSVC_MAXPAGES); newxprt->sc_max_requests = min((size_t)devattr.max_qp_wr, (size_t)svcrdma_max_requests); newxprt->sc_sq_depth = RPCRDMA_SQ_DEPTH_MULT * newxprt->sc_max_requests; /* * Limit ORD based on client limit, local device limit, and * configured svcrdma limit. */ newxprt->sc_ord = min_t(size_t, devattr.max_qp_rd_atom, newxprt->sc_ord); newxprt->sc_ord = min_t(size_t, svcrdma_ord, newxprt->sc_ord); newxprt->sc_pd = ib_alloc_pd(newxprt->sc_cm_id->device); if (IS_ERR(newxprt->sc_pd)) { dprintk("svcrdma: error creating PD for connect request\n"); goto errout; } newxprt->sc_sq_cq = ib_create_cq(newxprt->sc_cm_id->device, sq_comp_handler, cq_event_handler, newxprt, newxprt->sc_sq_depth, 0); if (IS_ERR(newxprt->sc_sq_cq)) { dprintk("svcrdma: error creating SQ CQ for connect request\n"); goto errout; } newxprt->sc_rq_cq = ib_create_cq(newxprt->sc_cm_id->device, rq_comp_handler, cq_event_handler, newxprt, newxprt->sc_max_requests, 0); if (IS_ERR(newxprt->sc_rq_cq)) { dprintk("svcrdma: error creating RQ CQ for connect request\n"); goto errout; } memset(&qp_attr, 0, sizeof qp_attr); qp_attr.event_handler = qp_event_handler; qp_attr.qp_context = &newxprt->sc_xprt; qp_attr.cap.max_send_wr = newxprt->sc_sq_depth; qp_attr.cap.max_recv_wr = newxprt->sc_max_requests; qp_attr.cap.max_send_sge = newxprt->sc_max_sge; qp_attr.cap.max_recv_sge = newxprt->sc_max_sge; qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; qp_attr.qp_type = IB_QPT_RC; qp_attr.send_cq = newxprt->sc_sq_cq; qp_attr.recv_cq = newxprt->sc_rq_cq; dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n" " cm_id->device=%p, sc_pd->device=%p\n" " cap.max_send_wr = %d\n" " cap.max_recv_wr = %d\n" " cap.max_send_sge = %d\n" " cap.max_recv_sge = %d\n", newxprt->sc_cm_id, newxprt->sc_pd, newxprt->sc_cm_id->device, newxprt->sc_pd->device, qp_attr.cap.max_send_wr, qp_attr.cap.max_recv_wr, qp_attr.cap.max_send_sge, qp_attr.cap.max_recv_sge); ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr); if (ret) { /* * XXX: This is a hack. We need a xx_request_qp interface * that will adjust the qp_attr's with a best-effort * number */ qp_attr.cap.max_send_sge -= 2; qp_attr.cap.max_recv_sge -= 2; ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr); if (ret) { dprintk("svcrdma: failed to create QP, ret=%d\n", ret); goto errout; } newxprt->sc_max_sge = qp_attr.cap.max_send_sge; newxprt->sc_max_sge = qp_attr.cap.max_recv_sge; newxprt->sc_sq_depth = qp_attr.cap.max_send_wr; newxprt->sc_max_requests = qp_attr.cap.max_recv_wr; } newxprt->sc_qp = newxprt->sc_cm_id->qp; /* * Use the most secure set of MR resources based on the * transport type and available memory management features in * the device. Here's the table implemented below: * * Fast Global DMA Remote WR * Reg LKEY MR Access * Sup'd Sup'd Needed Needed * * IWARP N N Y Y * N Y Y Y * Y N Y N * Y Y N - * * IB N N Y N * N Y N - * Y N Y N * Y Y N - * * NB: iWARP requires remote write access for the data sink * of an RDMA_READ. IB does not. */ if (devattr.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { newxprt->sc_frmr_pg_list_len = devattr.max_fast_reg_page_list_len; newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_FAST_REG; } /* * Determine if a DMA MR is required and if so, what privs are required */ switch (rdma_node_get_transport(newxprt->sc_cm_id->device->node_type)) { case RDMA_TRANSPORT_IWARP: newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_READ_W_INV; if (!(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG)) { need_dma_mr = 1; dma_mr_acc = (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE); } else if (!(devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) { need_dma_mr = 1; dma_mr_acc = IB_ACCESS_LOCAL_WRITE; } else need_dma_mr = 0; break; case RDMA_TRANSPORT_IB: if (!(devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) { need_dma_mr = 1; dma_mr_acc = IB_ACCESS_LOCAL_WRITE; } else need_dma_mr = 0; break; default: goto errout; } /* Create the DMA MR if needed, otherwise, use the DMA LKEY */ if (need_dma_mr) { /* Register all of physical memory */ newxprt->sc_phys_mr = ib_get_dma_mr(newxprt->sc_pd, dma_mr_acc); if (IS_ERR(newxprt->sc_phys_mr)) { dprintk("svcrdma: Failed to create DMA MR ret=%d\n", ret); goto errout; } newxprt->sc_dma_lkey = newxprt->sc_phys_mr->lkey; } else newxprt->sc_dma_lkey = newxprt->sc_cm_id->device->local_dma_lkey; /* Post receive buffers */ for (i = 0; i < newxprt->sc_max_requests; i++) { ret = svc_rdma_post_recv(newxprt); if (ret) { dprintk("svcrdma: failure posting receive buffers\n"); goto errout; } } /* Swap out the handler */ newxprt->sc_cm_id->event_handler = rdma_cma_handler; /* * Arm the CQs for the SQ and RQ before accepting so we can't * miss the first message */ ib_req_notify_cq(newxprt->sc_sq_cq, IB_CQ_NEXT_COMP); ib_req_notify_cq(newxprt->sc_rq_cq, IB_CQ_NEXT_COMP); /* Accept Connection */ set_bit(RDMAXPRT_CONN_PENDING, &newxprt->sc_flags); memset(&conn_param, 0, sizeof conn_param); conn_param.responder_resources = 0; conn_param.initiator_depth = newxprt->sc_ord; ret = rdma_accept(newxprt->sc_cm_id, &conn_param); if (ret) { dprintk("svcrdma: failed to accept new connection, ret=%d\n", ret); goto errout; } dprintk("svcrdma: new connection %p accepted with the following " "attributes:\n" " local_ip : %pI4\n" " local_port : %d\n" " remote_ip : %pI4\n" " remote_port : %d\n" " max_sge : %d\n" " sq_depth : %d\n" " max_requests : %d\n" " ord : %d\n", newxprt, &((struct sockaddr_in *)&newxprt->sc_cm_id-> route.addr.src_addr)->sin_addr.s_addr, ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id-> route.addr.src_addr)->sin_port), &((struct sockaddr_in *)&newxprt->sc_cm_id-> route.addr.dst_addr)->sin_addr.s_addr, ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id-> route.addr.dst_addr)->sin_port), newxprt->sc_max_sge, newxprt->sc_sq_depth, newxprt->sc_max_requests, newxprt->sc_ord); return &newxprt->sc_xprt; errout: dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret); /* Take a reference in case the DTO handler runs */ svc_xprt_get(&newxprt->sc_xprt); if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp)) ib_destroy_qp(newxprt->sc_qp); rdma_destroy_id(newxprt->sc_cm_id); /* This call to put will destroy the transport */ svc_xprt_put(&newxprt->sc_xprt); return NULL; } static void svc_rdma_release_rqst(struct svc_rqst *rqstp) { } /* * When connected, an svc_xprt has at least two references: * * - A reference held by the cm_id between the ESTABLISHED and * DISCONNECTED events. If the remote peer disconnected first, this * reference could be gone. * * - A reference held by the svc_recv code that called this function * as part of close processing. * * At a minimum one references should still be held. */ static void svc_rdma_detach(struct svc_xprt *xprt) { struct svcxprt_rdma *rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt); dprintk("svc: svc_rdma_detach(%p)\n", xprt); /* Disconnect and flush posted WQE */ rdma_disconnect(rdma->sc_cm_id); } static void __svc_rdma_free(struct work_struct *work) { struct svcxprt_rdma *rdma = container_of(work, struct svcxprt_rdma, sc_work); dprintk("svcrdma: svc_rdma_free(%p)\n", rdma); /* We should only be called from kref_put */ BUG_ON(atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0); /* * Destroy queued, but not processed read completions. Note * that this cleanup has to be done before destroying the * cm_id because the device ptr is needed to unmap the dma in * svc_rdma_put_context. */ while (!list_empty(&rdma->sc_read_complete_q)) { struct svc_rdma_op_ctxt *ctxt; ctxt = list_entry(rdma->sc_read_complete_q.next, struct svc_rdma_op_ctxt, dto_q); list_del_init(&ctxt->dto_q); svc_rdma_put_context(ctxt, 1); } /* Destroy queued, but not processed recv completions */ while (!list_empty(&rdma->sc_rq_dto_q)) { struct svc_rdma_op_ctxt *ctxt; ctxt = list_entry(rdma->sc_rq_dto_q.next, struct svc_rdma_op_ctxt, dto_q); list_del_init(&ctxt->dto_q); svc_rdma_put_context(ctxt, 1); } /* Warn if we leaked a resource or under-referenced */ WARN_ON(atomic_read(&rdma->sc_ctxt_used) != 0); WARN_ON(atomic_read(&rdma->sc_dma_used) != 0); /* De-allocate fastreg mr */ rdma_dealloc_frmr_q(rdma); /* Destroy the QP if present (not a listener) */ if (rdma->sc_qp && !IS_ERR(rdma->sc_qp)) ib_destroy_qp(rdma->sc_qp); if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq)) ib_destroy_cq(rdma->sc_sq_cq); if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq)) ib_destroy_cq(rdma->sc_rq_cq); if (rdma->sc_phys_mr && !IS_ERR(rdma->sc_phys_mr)) ib_dereg_mr(rdma->sc_phys_mr); if (rdma->sc_pd && !IS_ERR(rdma->sc_pd)) ib_dealloc_pd(rdma->sc_pd); /* Destroy the CM ID */ rdma_destroy_id(rdma->sc_cm_id); kfree(rdma); } static void svc_rdma_free(struct svc_xprt *xprt) { struct svcxprt_rdma *rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt); INIT_WORK(&rdma->sc_work, __svc_rdma_free); schedule_work(&rdma->sc_work); } static int svc_rdma_has_wspace(struct svc_xprt *xprt) { struct svcxprt_rdma *rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt); /* * If there are fewer SQ WR available than required to send a * simple response, return false. */ if ((rdma->sc_sq_depth - atomic_read(&rdma->sc_sq_count) < 3)) return 0; /* * ...or there are already waiters on the SQ, * return false. */ if (waitqueue_active(&rdma->sc_send_wait)) return 0; /* Otherwise return true. */ return 1; } /* * Attempt to register the kvec representing the RPC memory with the * device. * * Returns: * NULL : The device does not support fastreg or there were no more * fastreg mr. * frmr : The kvec register request was successfully posted. * <0 : An error was encountered attempting to register the kvec. */ int svc_rdma_fastreg(struct svcxprt_rdma *xprt, struct svc_rdma_fastreg_mr *frmr) { struct ib_send_wr fastreg_wr; u8 key; /* Bump the key */ key = (u8)(frmr->mr->lkey & 0x000000FF); ib_update_fast_reg_key(frmr->mr, ++key); /* Prepare FASTREG WR */ memset(&fastreg_wr, 0, sizeof fastreg_wr); fastreg_wr.opcode = IB_WR_FAST_REG_MR; fastreg_wr.send_flags = IB_SEND_SIGNALED; fastreg_wr.wr.fast_reg.iova_start = (unsigned long)frmr->kva; fastreg_wr.wr.fast_reg.page_list = frmr->page_list; fastreg_wr.wr.fast_reg.page_list_len = frmr->page_list_len; fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT; fastreg_wr.wr.fast_reg.length = frmr->map_len; fastreg_wr.wr.fast_reg.access_flags = frmr->access_flags; fastreg_wr.wr.fast_reg.rkey = frmr->mr->lkey; return svc_rdma_send(xprt, &fastreg_wr); } int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr) { struct ib_send_wr *bad_wr, *n_wr; int wr_count; int i; int ret; if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags)) return -ENOTCONN; BUG_ON(wr->send_flags != IB_SEND_SIGNALED); wr_count = 1; for (n_wr = wr->next; n_wr; n_wr = n_wr->next) wr_count++; /* If the SQ is full, wait until an SQ entry is available */ while (1) { spin_lock_bh(&xprt->sc_lock); if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) { spin_unlock_bh(&xprt->sc_lock); atomic_inc(&rdma_stat_sq_starve); /* See if we can opportunistically reap SQ WR to make room */ sq_cq_reap(xprt); /* Wait until SQ WR available if SQ still full */ wait_event(xprt->sc_send_wait, atomic_read(&xprt->sc_sq_count) < xprt->sc_sq_depth); if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags)) return 0; continue; } /* Take a transport ref for each WR posted */ for (i = 0; i < wr_count; i++) svc_xprt_get(&xprt->sc_xprt); /* Bump used SQ WR count and post */ atomic_add(wr_count, &xprt->sc_sq_count); ret = ib_post_send(xprt->sc_qp, wr, &bad_wr); if (ret) { set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); atomic_sub(wr_count, &xprt->sc_sq_count); for (i = 0; i < wr_count; i ++) svc_xprt_put(&xprt->sc_xprt); dprintk("svcrdma: failed to post SQ WR rc=%d, " "sc_sq_count=%d, sc_sq_depth=%d\n", ret, atomic_read(&xprt->sc_sq_count), xprt->sc_sq_depth); } spin_unlock_bh(&xprt->sc_lock); if (ret) wake_up(&xprt->sc_send_wait); break; } return ret; } void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, enum rpcrdma_errcode err) { struct ib_send_wr err_wr; struct ib_sge sge; struct page *p; struct svc_rdma_op_ctxt *ctxt; u32 *va; int length; int ret; p = svc_rdma_get_page(); va = page_address(p); /* XDR encode error */ length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va); /* Prepare SGE for local address */ sge.addr = ib_dma_map_single(xprt->sc_cm_id->device, page_address(p), PAGE_SIZE, DMA_FROM_DEVICE); if (ib_dma_mapping_error(xprt->sc_cm_id->device, sge.addr)) { put_page(p); return; } atomic_inc(&xprt->sc_dma_used); sge.lkey = xprt->sc_dma_lkey; sge.length = length; ctxt = svc_rdma_get_context(xprt); ctxt->count = 1; ctxt->pages[0] = p; /* Prepare SEND WR */ memset(&err_wr, 0, sizeof err_wr); ctxt->wr_op = IB_WR_SEND; err_wr.wr_id = (unsigned long)ctxt; err_wr.sg_list = &sge; err_wr.num_sge = 1; err_wr.opcode = IB_WR_SEND; err_wr.send_flags = IB_SEND_SIGNALED; /* Post It */ ret = svc_rdma_send(xprt, &err_wr); if (ret) { dprintk("svcrdma: Error %d posting send for protocol error\n", ret); ib_dma_unmap_single(xprt->sc_cm_id->device, sge.addr, PAGE_SIZE, DMA_FROM_DEVICE); svc_rdma_put_context(ctxt, 1); } }
gpl-2.0
01org/XenGT-Preview-kernel
arch/mips/loongson/common/serial.c
520
2317
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org) * * Copyright (C) 2009 Lemote, Inc. * Author: Yan hua (yanhua@lemote.com) * Author: Wu Zhangjin (wuzhangjin@gmail.com) */ #include <linux/io.h> #include <linux/init.h> #include <linux/serial_8250.h> #include <asm/bootinfo.h> #include <loongson.h> #include <machine.h> #define PORT(int, clk) \ { \ .irq = int, \ .uartclk = clk, \ .iotype = UPIO_PORT, \ .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, \ .regshift = 0, \ } #define PORT_M(int, clk) \ { \ .irq = MIPS_CPU_IRQ_BASE + (int), \ .uartclk = clk, \ .iotype = UPIO_MEM, \ .membase = (void __iomem *)NULL, \ .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, \ .regshift = 0, \ } static struct plat_serial8250_port uart8250_data[][2] = { [MACH_LOONGSON_UNKNOWN] {}, [MACH_LEMOTE_FL2E] {PORT(4, 1843200), {} }, [MACH_LEMOTE_FL2F] {PORT(3, 1843200), {} }, [MACH_LEMOTE_ML2F7] {PORT_M(3, 3686400), {} }, [MACH_LEMOTE_YL2F89] {PORT_M(3, 3686400), {} }, [MACH_DEXXON_GDIUM2F10] {PORT_M(3, 3686400), {} }, [MACH_LEMOTE_NAS] {PORT_M(3, 3686400), {} }, [MACH_LEMOTE_LL2F] {PORT(3, 1843200), {} }, [MACH_LEMOTE_A1004] {PORT_M(2, 33177600), {} }, [MACH_LEMOTE_A1101] {PORT_M(2, 25000000), {} }, [MACH_LEMOTE_A1201] {PORT_M(2, 25000000), {} }, [MACH_LEMOTE_A1205] {PORT_M(2, 25000000), {} }, [MACH_LOONGSON_END] {}, }; static struct platform_device uart8250_device = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, }; static int __init serial_init(void) { unsigned char iotype; iotype = uart8250_data[mips_machtype][0].iotype; if (UPIO_MEM == iotype) uart8250_data[mips_machtype][0].membase = (void __iomem *)_loongson_uart_base; else if (UPIO_PORT == iotype) uart8250_data[mips_machtype][0].iobase = loongson_uart_base - LOONGSON_PCIIO_BASE; uart8250_device.dev.platform_data = uart8250_data[mips_machtype]; return platform_device_register(&uart8250_device); } device_initcall(serial_init);
gpl-2.0
markolino631/lge_kernel_msm7x27
arch/arm/mach-shmobile/intc-sh7367.c
776
10828
/* * sh7367 processor support - INTC hardware block * * Copyright (C) 2010 Magnus Damm * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/sh_intc.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> enum { UNUSED_INTCA = 0, ENABLED, DISABLED, /* interrupt sources INTCA */ IRQ0A, IRQ1A, IRQ2A, IRQ3A, IRQ4A, IRQ5A, IRQ6A, IRQ7A, IRQ8A, IRQ9A, IRQ10A, IRQ11A, IRQ12A, IRQ13A, IRQ14A, IRQ15A, DIRC, CRYPT1_ERR, CRYPT2_STD, IIC1_ALI1, IIC1_TACKI1, IIC1_WAITI1, IIC1_DTEI1, ARM11_IRQPMU, ARM11_COMMTX, ARM11_COMMRX, ETM11_ACQCMP, ETM11_FULL, MFI_MFIM, MFI_MFIS, BBIF1, BBIF2, USBDMAC_USHDMI, USBHS_USHI0, USBHS_USHI1, CMT1_CMT10, CMT1_CMT11, CMT1_CMT12, CMT1_CMT13, CMT2, CMT3, KEYSC_KEY, SCIFA0, SCIFA1, SCIFA2, SCIFA3, MSIOF2, MSIOF1, SCIFA4, SCIFA5, SCIFB, FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I, SDHI0, SDHI1, MSU_MSU, MSU_MSU2, IREM, SIU, SPU, IRDA, TPU0, TPU1, TPU2, TPU3, TPU4, LCRC, PINT1, PINT2, TTI20, MISTY, DDM, SDHI2, RWDT0, RWDT1, DMAC_1_DEI0, DMAC_1_DEI1, DMAC_1_DEI2, DMAC_1_DEI3, DMAC_2_DEI4, DMAC_2_DEI5, DMAC_2_DADERR, DMAC2_1_DEI0, DMAC2_1_DEI1, DMAC2_1_DEI2, DMAC2_1_DEI3, DMAC2_2_DEI4, DMAC2_2_DEI5, DMAC2_2_DADERR, DMAC3_1_DEI0, DMAC3_1_DEI1, DMAC3_1_DEI2, DMAC3_1_DEI3, DMAC3_2_DEI4, DMAC3_2_DEI5, DMAC3_2_DADERR, /* interrupt groups INTCA */ DMAC_1, DMAC_2, DMAC2_1, DMAC2_2, DMAC3_1, DMAC3_2, ETM11, ARM11, USBHS, FLCTL, IIC1 }; static struct intc_vect intca_vectors[] = { INTC_VECT(IRQ0A, 0x0200), INTC_VECT(IRQ1A, 0x0220), INTC_VECT(IRQ2A, 0x0240), INTC_VECT(IRQ3A, 0x0260), INTC_VECT(IRQ4A, 0x0280), INTC_VECT(IRQ5A, 0x02a0), INTC_VECT(IRQ6A, 0x02c0), INTC_VECT(IRQ7A, 0x02e0), INTC_VECT(IRQ8A, 0x0300), INTC_VECT(IRQ9A, 0x0320), INTC_VECT(IRQ10A, 0x0340), INTC_VECT(IRQ11A, 0x0360), INTC_VECT(IRQ12A, 0x0380), INTC_VECT(IRQ13A, 0x03a0), INTC_VECT(IRQ14A, 0x03c0), INTC_VECT(IRQ15A, 0x03e0), INTC_VECT(DIRC, 0x0560), INTC_VECT(CRYPT1_ERR, 0x05e0), INTC_VECT(CRYPT2_STD, 0x0700), INTC_VECT(IIC1_ALI1, 0x0780), INTC_VECT(IIC1_TACKI1, 0x07a0), INTC_VECT(IIC1_WAITI1, 0x07c0), INTC_VECT(IIC1_DTEI1, 0x07e0), INTC_VECT(ARM11_IRQPMU, 0x0800), INTC_VECT(ARM11_COMMTX, 0x0840), INTC_VECT(ARM11_COMMRX, 0x0860), INTC_VECT(ETM11_ACQCMP, 0x0880), INTC_VECT(ETM11_FULL, 0x08a0), INTC_VECT(MFI_MFIM, 0x0900), INTC_VECT(MFI_MFIS, 0x0920), INTC_VECT(BBIF1, 0x0940), INTC_VECT(BBIF2, 0x0960), INTC_VECT(USBDMAC_USHDMI, 0x0a00), INTC_VECT(USBHS_USHI0, 0x0a20), INTC_VECT(USBHS_USHI1, 0x0a40), INTC_VECT(CMT1_CMT10, 0x0b00), INTC_VECT(CMT1_CMT11, 0x0b20), INTC_VECT(CMT1_CMT12, 0x0b40), INTC_VECT(CMT1_CMT13, 0x0b60), INTC_VECT(CMT2, 0x0b80), INTC_VECT(CMT3, 0x0ba0), INTC_VECT(KEYSC_KEY, 0x0be0), INTC_VECT(SCIFA0, 0x0c00), INTC_VECT(SCIFA1, 0x0c20), INTC_VECT(SCIFA2, 0x0c40), INTC_VECT(SCIFA3, 0x0c60), INTC_VECT(MSIOF2, 0x0c80), INTC_VECT(MSIOF1, 0x0d00), INTC_VECT(SCIFA4, 0x0d20), INTC_VECT(SCIFA5, 0x0d40), INTC_VECT(SCIFB, 0x0d60), INTC_VECT(FLCTL_FLSTEI, 0x0d80), INTC_VECT(FLCTL_FLTENDI, 0x0da0), INTC_VECT(FLCTL_FLTREQ0I, 0x0dc0), INTC_VECT(FLCTL_FLTREQ1I, 0x0de0), INTC_VECT(SDHI0, 0x0e00), INTC_VECT(SDHI0, 0x0e20), INTC_VECT(SDHI0, 0x0e40), INTC_VECT(SDHI0, 0x0e60), INTC_VECT(SDHI1, 0x0e80), INTC_VECT(SDHI1, 0x0ea0), INTC_VECT(SDHI1, 0x0ec0), INTC_VECT(SDHI1, 0x0ee0), INTC_VECT(MSU_MSU, 0x0f20), INTC_VECT(MSU_MSU2, 0x0f40), INTC_VECT(IREM, 0x0f60), INTC_VECT(SIU, 0x0fa0), INTC_VECT(SPU, 0x0fc0), INTC_VECT(IRDA, 0x0480), INTC_VECT(TPU0, 0x04a0), INTC_VECT(TPU1, 0x04c0), INTC_VECT(TPU2, 0x04e0), INTC_VECT(TPU3, 0x0500), INTC_VECT(TPU4, 0x0520), INTC_VECT(LCRC, 0x0540), INTC_VECT(PINT1, 0x1000), INTC_VECT(PINT2, 0x1020), INTC_VECT(TTI20, 0x1100), INTC_VECT(MISTY, 0x1120), INTC_VECT(DDM, 0x1140), INTC_VECT(SDHI2, 0x1200), INTC_VECT(SDHI2, 0x1220), INTC_VECT(SDHI2, 0x1240), INTC_VECT(SDHI2, 0x1260), INTC_VECT(RWDT0, 0x1280), INTC_VECT(RWDT1, 0x12a0), INTC_VECT(DMAC_1_DEI0, 0x2000), INTC_VECT(DMAC_1_DEI1, 0x2020), INTC_VECT(DMAC_1_DEI2, 0x2040), INTC_VECT(DMAC_1_DEI3, 0x2060), INTC_VECT(DMAC_2_DEI4, 0x2080), INTC_VECT(DMAC_2_DEI5, 0x20a0), INTC_VECT(DMAC_2_DADERR, 0x20c0), INTC_VECT(DMAC2_1_DEI0, 0x2100), INTC_VECT(DMAC2_1_DEI1, 0x2120), INTC_VECT(DMAC2_1_DEI2, 0x2140), INTC_VECT(DMAC2_1_DEI3, 0x2160), INTC_VECT(DMAC2_2_DEI4, 0x2180), INTC_VECT(DMAC2_2_DEI5, 0x21a0), INTC_VECT(DMAC2_2_DADERR, 0x21c0), INTC_VECT(DMAC3_1_DEI0, 0x2200), INTC_VECT(DMAC3_1_DEI1, 0x2220), INTC_VECT(DMAC3_1_DEI2, 0x2240), INTC_VECT(DMAC3_1_DEI3, 0x2260), INTC_VECT(DMAC3_2_DEI4, 0x2280), INTC_VECT(DMAC3_2_DEI5, 0x22a0), INTC_VECT(DMAC3_2_DADERR, 0x22c0), }; static struct intc_group intca_groups[] __initdata = { INTC_GROUP(DMAC_1, DMAC_1_DEI0, DMAC_1_DEI1, DMAC_1_DEI2, DMAC_1_DEI3), INTC_GROUP(DMAC_2, DMAC_2_DEI4, DMAC_2_DEI5, DMAC_2_DADERR), INTC_GROUP(DMAC2_1, DMAC2_1_DEI0, DMAC2_1_DEI1, DMAC2_1_DEI2, DMAC2_1_DEI3), INTC_GROUP(DMAC2_2, DMAC2_2_DEI4, DMAC2_2_DEI5, DMAC2_2_DADERR), INTC_GROUP(DMAC3_1, DMAC3_1_DEI0, DMAC3_1_DEI1, DMAC3_1_DEI2, DMAC3_1_DEI3), INTC_GROUP(DMAC3_2, DMAC3_2_DEI4, DMAC3_2_DEI5, DMAC3_2_DADERR), INTC_GROUP(ETM11, ETM11_ACQCMP, ETM11_FULL), INTC_GROUP(ARM11, ARM11_IRQPMU, ARM11_COMMTX, ARM11_COMMTX), INTC_GROUP(USBHS, USBHS_USHI0, USBHS_USHI1), INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I), INTC_GROUP(IIC1, IIC1_ALI1, IIC1_TACKI1, IIC1_WAITI1, IIC1_DTEI1), }; static struct intc_mask_reg intca_mask_registers[] = { { 0xe6900040, 0xe6900060, 8, /* INTMSK00A / INTMSKCLR00A */ { IRQ0A, IRQ1A, IRQ2A, IRQ3A, IRQ4A, IRQ5A, IRQ6A, IRQ7A } }, { 0xe6900044, 0xe6900064, 8, /* INTMSK10A / INTMSKCLR10A */ { IRQ8A, IRQ9A, IRQ10A, IRQ11A, IRQ12A, IRQ13A, IRQ14A, IRQ15A } }, { 0xe6940080, 0xe69400c0, 8, /* IMR0A / IMCR0A */ { DMAC2_1_DEI3, DMAC2_1_DEI2, DMAC2_1_DEI1, DMAC2_1_DEI0, ARM11_IRQPMU, 0, ARM11_COMMTX, ARM11_COMMRX } }, { 0xe6940084, 0xe69400c4, 8, /* IMR1A / IMCR1A */ { CRYPT1_ERR, CRYPT2_STD, DIRC, 0, DMAC_1_DEI3, DMAC_1_DEI2, DMAC_1_DEI1, DMAC_1_DEI0 } }, { 0xe6940088, 0xe69400c8, 8, /* IMR2A / IMCR2A */ { PINT1, PINT2, 0, 0, BBIF1, BBIF2, MFI_MFIS, MFI_MFIM } }, { 0xe694008c, 0xe69400cc, 8, /* IMR3A / IMCR3A */ { DMAC3_1_DEI3, DMAC3_1_DEI2, DMAC3_1_DEI1, DMAC3_1_DEI0, DMAC3_2_DADERR, DMAC3_2_DEI5, DMAC3_2_DEI4, IRDA } }, { 0xe6940090, 0xe69400d0, 8, /* IMR4A / IMCR4A */ { DDM, 0, 0, 0, 0, 0, ETM11_FULL, ETM11_ACQCMP } }, { 0xe6940094, 0xe69400d4, 8, /* IMR5A / IMCR5A */ { KEYSC_KEY, DMAC_2_DADERR, DMAC_2_DEI5, DMAC_2_DEI4, SCIFA3, SCIFA2, SCIFA1, SCIFA0 } }, { 0xe6940098, 0xe69400d8, 8, /* IMR6A / IMCR6A */ { SCIFB, SCIFA5, SCIFA4, MSIOF1, 0, 0, MSIOF2, 0 } }, { 0xe694009c, 0xe69400dc, 8, /* IMR7A / IMCR7A */ { DISABLED, DISABLED, ENABLED, ENABLED, FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } }, { 0xe69400a0, 0xe69400e0, 8, /* IMR8A / IMCR8A */ { DISABLED, DISABLED, ENABLED, ENABLED, TTI20, USBDMAC_USHDMI, SPU, SIU } }, { 0xe69400a4, 0xe69400e4, 8, /* IMR9A / IMCR9A */ { CMT1_CMT13, CMT1_CMT12, CMT1_CMT11, CMT1_CMT10, CMT2, USBHS_USHI1, USBHS_USHI0, 0 } }, { 0xe69400a8, 0xe69400e8, 8, /* IMR10A / IMCR10A */ { 0, DMAC2_2_DADERR, DMAC2_2_DEI5, DMAC2_2_DEI4, 0, 0, 0, 0 } }, { 0xe69400ac, 0xe69400ec, 8, /* IMR11A / IMCR11A */ { IIC1_DTEI1, IIC1_WAITI1, IIC1_TACKI1, IIC1_ALI1, LCRC, MSU_MSU2, IREM, MSU_MSU } }, { 0xe69400b0, 0xe69400f0, 8, /* IMR12A / IMCR12A */ { 0, 0, TPU0, TPU1, TPU2, TPU3, TPU4, 0 } }, { 0xe69400b4, 0xe69400f4, 8, /* IMR13A / IMCR13A */ { DISABLED, DISABLED, ENABLED, ENABLED, MISTY, CMT3, RWDT1, RWDT0 } }, }; static struct intc_prio_reg intca_prio_registers[] = { { 0xe6900010, 0, 32, 4, /* INTPRI00A */ { IRQ0A, IRQ1A, IRQ2A, IRQ3A, IRQ4A, IRQ5A, IRQ6A, IRQ7A } }, { 0xe6900014, 0, 32, 4, /* INTPRI10A */ { IRQ8A, IRQ9A, IRQ10A, IRQ11A, IRQ12A, IRQ13A, IRQ14A, IRQ15A } }, { 0xe6940000, 0, 16, 4, /* IPRAA */ { DMAC3_1, DMAC3_2, CMT2, LCRC } }, { 0xe6940004, 0, 16, 4, /* IPRBA */ { IRDA, ETM11, BBIF1, BBIF2 } }, { 0xe6940008, 0, 16, 4, /* IPRCA */ { CRYPT1_ERR, CRYPT2_STD, CMT1_CMT11, ARM11 } }, { 0xe694000c, 0, 16, 4, /* IPRDA */ { PINT1, PINT2, CMT1_CMT12, TPU4 } }, { 0xe6940010, 0, 16, 4, /* IPREA */ { DMAC_1, MFI_MFIS, MFI_MFIM, USBHS } }, { 0xe6940014, 0, 16, 4, /* IPRFA */ { KEYSC_KEY, DMAC_2, 0, CMT1_CMT10 } }, { 0xe6940018, 0, 16, 4, /* IPRGA */ { SCIFA0, SCIFA1, SCIFA2, SCIFA3 } }, { 0xe694001c, 0, 16, 4, /* IPRGH */ { MSIOF2, USBDMAC_USHDMI, FLCTL, SDHI0 } }, { 0xe6940020, 0, 16, 4, /* IPRIA */ { MSIOF1, SCIFA4, MSU_MSU, IIC1 } }, { 0xe6940024, 0, 16, 4, /* IPRJA */ { DMAC2_1, DMAC2_2, SIU, TTI20 } }, { 0xe6940028, 0, 16, 4, /* IPRKA */ { 0, CMT1_CMT13, IREM, SDHI1 } }, { 0xe694002c, 0, 16, 4, /* IPRLA */ { TPU0, TPU1, TPU2, TPU3 } }, { 0xe6940030, 0, 16, 4, /* IPRMA */ { MISTY, CMT3, RWDT1, RWDT0 } }, { 0xe6940034, 0, 16, 4, /* IPRNA */ { SCIFB, SCIFA5, SPU, DDM } }, { 0xe6940038, 0, 16, 4, /* IPROA */ { 0, 0, DIRC, SDHI2 } }, }; static struct intc_sense_reg intca_sense_registers[] __initdata = { { 0xe6900000, 16, 2, /* ICR1A */ { IRQ0A, IRQ1A, IRQ2A, IRQ3A, IRQ4A, IRQ5A, IRQ6A, IRQ7A } }, { 0xe6900004, 16, 2, /* ICR2A */ { IRQ8A, IRQ9A, IRQ10A, IRQ11A, IRQ12A, IRQ13A, IRQ14A, IRQ15A } }, }; static struct intc_mask_reg intca_ack_registers[] __initdata = { { 0xe6900020, 0, 8, /* INTREQ00A */ { IRQ0A, IRQ1A, IRQ2A, IRQ3A, IRQ4A, IRQ5A, IRQ6A, IRQ7A } }, { 0xe6900024, 0, 8, /* INTREQ10A */ { IRQ8A, IRQ9A, IRQ10A, IRQ11A, IRQ12A, IRQ13A, IRQ14A, IRQ15A } }, }; static struct intc_desc intca_desc __initdata = { .name = "sh7367-intca", .force_enable = ENABLED, .force_disable = DISABLED, .hw = INTC_HW_DESC(intca_vectors, intca_groups, intca_mask_registers, intca_prio_registers, intca_sense_registers, intca_ack_registers), }; void __init sh7367_init_irq(void) { /* INTCA */ register_intc_controller(&intca_desc); }
gpl-2.0
myrual/openwrt
package/ead/src/tinysrp/t_conf.c
776
24343
/* * Copyright (c) 1997-1999 The Stanford SRP Authentication Project * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND, * EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. * * IN NO EVENT SHALL STANFORD BE LIABLE FOR ANY SPECIAL, INCIDENTAL, * INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, OR ANY DAMAGES WHATSOEVER * RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER OR NOT ADVISED OF * THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY OF LIABILITY, ARISING OUT * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * * In addition, the following conditions apply: * * 1. Any software that incorporates the SRP authentication technology * must display the following acknowlegment: * "This product uses the 'Secure Remote Password' cryptographic * authentication system developed by Tom Wu (tjw@CS.Stanford.EDU)." * * 2. Any software that incorporates all or part of the SRP distribution * itself must also display the following acknowledgment: * "This product includes software developed by Tom Wu and Eugene * Jhong for the SRP Distribution (http://srp.stanford.edu/srp/)." * * 3. Redistributions in source or binary form must retain an intact copy * of this copyright notice and list of conditions. */ #include <stdio.h> #include "t_defines.h" #include "t_pwd.h" #include "t_read.h" #include "bn.h" #include "bn_lcl.h" #include "bn_prime.h" #define TABLE_SIZE 32 static int witness(BIGNUM *w, const BIGNUM *a, const BIGNUM *a1, const BIGNUM *a1_odd, int k, BN_CTX *ctx, BN_MONT_CTX *mont); /* * This is the safe prime generation logic. * To generate a safe prime p (where p = 2q+1 and q is prime), we start * with a random odd q that is one bit shorter than the desired length * of p. We use a simple 30-element sieve to filter the values of q * and consider only those that are 11, 23, or 29 (mod 30). (If q were * anything else, either q or p would be divisible by 2, 3, or 5). * For the values of q that are left, we apply the following tests in * this order: * * trial divide q * let p = 2q + 1 * trial divide p * apply Fermat test to q (2^q == 2 (mod q)) * apply Fermat test to p (2^p == 2 (mod p)) * apply real probablistic primality test to q * apply real probablistic primality test to p * * A number that passes all these tests is considered a safe prime for * our purposes. The tests are ordered this way for efficiency; the * slower tests are run rarely if ever at all. */ static int trialdiv(x) const BigInteger x; { static int primes[] = { /* All odd primes < 256 */ 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251 }; static int nprimes = sizeof(primes) / sizeof(int); int i; for(i = 0; i < nprimes; ++i) { if(BigIntegerModInt(x, primes[i]) == 0) return primes[i]; } return 1; } /* x + sieve30[x%30] == 11, 23, or 29 (mod 30) */ static int sieve30[] = { 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 6, 5, 4, 3, 2, 1, 12 }; /* Find a Sophie-Germain prime between "lo" and "hi". NOTE: this is not a "safe prime", but the smaller prime. Take 2q+1 to get the safe prime. */ static void sophie_germain(q, lo, hi) BigInteger q; /* assumed initialized */ const BigInteger lo; const BigInteger hi; { BigInteger m, p, r; char parambuf[MAXPARAMLEN]; int foundprime = 0; int i, mod30; m = BigIntegerFromInt(0); BigIntegerSub(m, hi, lo); i = (BigIntegerBitLen(m) + 7) / 8; t_random(parambuf, i); r = BigIntegerFromBytes(parambuf, i); BigIntegerMod(r, r, m); BigIntegerAdd(q, r, lo); if(BigIntegerModInt(q, 2) == 0) BigIntegerAddInt(q, q, 1); /* make q odd */ mod30 = BigIntegerModInt(q, 30); /* mod30 = q % 30 */ BigIntegerFree(m); m = BigIntegerFromInt(2); /* m = 2 */ p = BigIntegerFromInt(0); while(BigIntegerCmp(q, hi) < 0) { if(trialdiv(q) < 2) { BigIntegerMulInt(p, q, 2); /* p = 2 * q */ BigIntegerAddInt(p, p, 1); /* p += 1 */ if(trialdiv(p) < 2) { BigIntegerModExp(r, m, q, q); /* r = 2^q % q */ if(BigIntegerCmpInt(r, 2) == 0) { /* if(r == 2) */ BigIntegerModExp(r, m, p, p); /* r = 2^p % p */ if(BigIntegerCmpInt(r, 2) == 0) { /* if(r == 2) */ if(BigIntegerCheckPrime(q) && BigIntegerCheckPrime(p)) { ++foundprime; break; } } } } } i = sieve30[mod30]; BigIntegerAddInt(q, q, i); /* q += i */ mod30 = (mod30 + i) % 30; } /* should wrap around on failure */ if(!foundprime) { fprintf(stderr, "Prime generation failed!\n"); exit(1); } BigIntegerFree(r); BigIntegerFree(m); BigIntegerFree(p); } _TYPE( struct t_confent * ) t_makeconfent(tc, nsize) struct t_conf * tc; int nsize; { BigInteger n, g, q, t, u; t = BigIntegerFromInt(0); u = BigIntegerFromInt(1); /* u = 1 */ BigIntegerLShift(t, u, nsize - 2); /* t = 2^(nsize-2) */ BigIntegerMulInt(u, t, 2); /* u = 2^(nsize-1) */ q = BigIntegerFromInt(0); sophie_germain(q, t, u); n = BigIntegerFromInt(0); BigIntegerMulInt(n, q, 2); BigIntegerAddInt(n, n, 1); /* Look for a generator mod n */ g = BigIntegerFromInt(2); while(1) { BigIntegerModExp(t, g, q, n); /* t = g^q % n */ if(BigIntegerCmpInt(t, 1) == 0) /* if(t == 1) */ BigIntegerAddInt(g, g, 1); /* ++g */ else break; } BigIntegerFree(t); BigIntegerFree(u); BigIntegerFree(q); tc->tcbuf.modulus.data = tc->modbuf; tc->tcbuf.modulus.len = BigIntegerToBytes(n, tc->tcbuf.modulus.data); BigIntegerFree(n); tc->tcbuf.generator.data = tc->genbuf; tc->tcbuf.generator.len = BigIntegerToBytes(g, tc->tcbuf.generator.data); BigIntegerFree(g); tc->tcbuf.index = 1; return &tc->tcbuf; } _TYPE( struct t_confent * ) t_makeconfent_c(tc, nsize) struct t_conf * tc; int nsize; { BigInteger g, n, p, q, j, k, t, u; int psize, qsize; psize = nsize / 2; qsize = nsize - psize; t = BigIntegerFromInt(1); /* t = 1 */ u = BigIntegerFromInt(0); BigIntegerLShift(u, t, psize - 3); /* u = t*2^(psize-3) = 2^(psize-3) */ BigIntegerMulInt(t, u, 3); /* t = 3*u = 1.5*2^(psize-2) */ BigIntegerAdd(u, u, t); /* u += t [u = 2^(psize-1)] */ j = BigIntegerFromInt(0); sophie_germain(j, t, u); k = BigIntegerFromInt(0); if(qsize != psize) { BigIntegerFree(t); t = BigIntegerFromInt(1); /* t = 1 */ BigIntegerLShift(u, t, qsize - 3); /* u = t*2^(qsize-3) = 2^(qsize-3) */ BigIntegerMulInt(t, u, 3); /* t = 3*u = 1.5*2^(qsize-2) */ BigIntegerAdd(u, u, t); /* u += t [u = 2^(qsize-1)] */ } sophie_germain(k, t, u); p = BigIntegerFromInt(0); BigIntegerMulInt(p, j, 2); /* p = 2 * j */ BigIntegerAddInt(p, p, 1); /* p += 1 */ q = BigIntegerFromInt(0); BigIntegerMulInt(q, k, 2); /* q = 2 * k */ BigIntegerAddInt(q, q, 1); /* q += 1 */ n = BigIntegerFromInt(0); BigIntegerMul(n, p, q); /* n = p * q */ BigIntegerMul(u, j, k); /* u = j * k */ BigIntegerFree(p); BigIntegerFree(q); BigIntegerFree(j); BigIntegerFree(k); g = BigIntegerFromInt(2); /* g = 2 */ /* Look for a generator mod n */ while(1) { BigIntegerModExp(t, g, u, n); /* t = g^u % n */ if(BigIntegerCmpInt(t, 1) == 0) BigIntegerAddInt(g, g, 1); /* ++g */ else break; } BigIntegerFree(u); BigIntegerFree(t); tc->tcbuf.modulus.data = tc->modbuf; tc->tcbuf.modulus.len = BigIntegerToBytes(n, tc->tcbuf.modulus.data); BigIntegerFree(n); tc->tcbuf.generator.data = tc->genbuf; tc->tcbuf.generator.len = BigIntegerToBytes(g, tc->tcbuf.generator.data); BigIntegerFree(g); tc->tcbuf.index = 1; return &tc->tcbuf; } _TYPE( struct t_confent * ) t_newconfent(tc) struct t_conf * tc; { tc->tcbuf.index = 0; tc->tcbuf.modulus.data = tc->modbuf; tc->tcbuf.modulus.len = 0; tc->tcbuf.generator.data = tc->genbuf; tc->tcbuf.generator.len = 0; return &tc->tcbuf; } _TYPE( void ) t_putconfent(ent, fp) const struct t_confent * ent; FILE * fp; { char strbuf[MAXB64PARAMLEN]; fprintf(fp, "%d:%s:", ent->index, t_tob64(strbuf, ent->modulus.data, ent->modulus.len)); fprintf(fp, "%s\n", t_tob64(strbuf, ent->generator.data, ent->generator.len)); } int BigIntegerBitLen(b) BigInteger b; { return BN_num_bits(b); } int BigIntegerCheckPrime(n) BigInteger n; { BN_CTX * ctx = BN_CTX_new(); int rv = BN_is_prime(n, 25, NULL, ctx, NULL); BN_CTX_free(ctx); return rv; } unsigned int BigIntegerModInt(d, m) BigInteger d; unsigned int m; { return BN_mod_word(d, m); } void BigIntegerMod(result, d, m) BigInteger result, d, m; { BN_CTX * ctx = BN_CTX_new(); BN_mod(result, d, m, ctx); BN_CTX_free(ctx); } void BigIntegerMul(result, m1, m2) BigInteger result, m1, m2; { BN_CTX * ctx = BN_CTX_new(); BN_mul(result, m1, m2, ctx); BN_CTX_free(ctx); } void BigIntegerLShift(result, x, bits) BigInteger result, x; unsigned int bits; { BN_lshift(result, x, bits); } int BN_is_prime(const BIGNUM *a, int checks, void (*callback)(int,int,void *), BN_CTX *ctx_passed, void *cb_arg) { return BN_is_prime_fasttest(a, checks, callback, ctx_passed, cb_arg, 0); } int BN_is_prime_fasttest(const BIGNUM *a, int checks, void (*callback)(int,int,void *), BN_CTX *ctx_passed, void *cb_arg, int do_trial_division) { int i, j, ret = -1; int k; BN_CTX *ctx = NULL; BIGNUM *A1, *A1_odd, *check; /* taken from ctx */ BN_MONT_CTX *mont = NULL; const BIGNUM *A = NULL; if (checks == BN_prime_checks) checks = BN_prime_checks_for_size(BN_num_bits(a)); /* first look for small factors */ if (!BN_is_odd(a)) return(0); if (do_trial_division) { for (i = 1; i < NUMPRIMES; i++) if (BN_mod_word(a, primes[i]) == 0) return 0; if (callback != NULL) callback(1, -1, cb_arg); } if (ctx_passed != NULL) ctx = ctx_passed; else if ((ctx=BN_CTX_new()) == NULL) goto err; BN_CTX_start(ctx); /* A := abs(a) */ if (a->neg) { BIGNUM *t; if ((t = BN_CTX_get(ctx)) == NULL) goto err; BN_copy(t, a); t->neg = 0; A = t; } else A = a; A1 = BN_CTX_get(ctx); A1_odd = BN_CTX_get(ctx); check = BN_CTX_get(ctx); if (check == NULL) goto err; /* compute A1 := A - 1 */ if (!BN_copy(A1, A)) goto err; if (!BN_sub_word(A1, 1)) goto err; if (BN_is_zero(A1)) { ret = 0; goto err; } /* write A1 as A1_odd * 2^k */ k = 1; while (!BN_is_bit_set(A1, k)) k++; if (!BN_rshift(A1_odd, A1, k)) goto err; /* Montgomery setup for computations mod A */ mont = BN_MONT_CTX_new(); if (mont == NULL) goto err; if (!BN_MONT_CTX_set(mont, A, ctx)) goto err; for (i = 0; i < checks; i++) { if (!BN_pseudo_rand(check, BN_num_bits(A1), 0, 0)) goto err; if (BN_cmp(check, A1) >= 0) if (!BN_sub(check, check, A1)) goto err; if (!BN_add_word(check, 1)) goto err; /* now 1 <= check < A */ j = witness(check, A, A1, A1_odd, k, ctx, mont); if (j == -1) goto err; if (j) { ret=0; goto err; } if (callback != NULL) callback(1,i,cb_arg); } ret=1; err: if (ctx != NULL) { BN_CTX_end(ctx); if (ctx_passed == NULL) BN_CTX_free(ctx); } if (mont != NULL) BN_MONT_CTX_free(mont); return(ret); } static int witness(BIGNUM *w, const BIGNUM *a, const BIGNUM *a1, const BIGNUM *a1_odd, int k, BN_CTX *ctx, BN_MONT_CTX *mont) { if (!BN_mod_exp_mont(w, w, a1_odd, a, ctx, mont)) /* w := w^a1_odd mod a */ return -1; if (BN_is_one(w)) return 0; /* probably prime */ if (BN_cmp(w, a1) == 0) return 0; /* w == -1 (mod a), 'a' is probably prime */ while (--k) { if (!BN_mod_mul(w, w, w, a, ctx)) /* w := w^2 mod a */ return -1; if (BN_is_one(w)) return 1; /* 'a' is composite, otherwise a previous 'w' would * have been == -1 (mod 'a') */ if (BN_cmp(w, a1) == 0) return 0; /* w == -1 (mod a), 'a' is probably prime */ } /* If we get here, 'w' is the (a-1)/2-th power of the original 'w', * and it is neither -1 nor +1 -- so 'a' cannot be prime */ return 1; } int BN_mod_exp_mont(BIGNUM *rr, BIGNUM *a, const BIGNUM *p, const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *in_mont) { int i,j,bits,ret=0,wstart,wend,window,wvalue; int start=1,ts=0; BIGNUM *d,*r; BIGNUM *aa; BIGNUM val[TABLE_SIZE]; BN_MONT_CTX *mont=NULL; bn_check_top(a); bn_check_top(p); bn_check_top(m); if (!(m->d[0] & 1)) { return(0); } bits=BN_num_bits(p); if (bits == 0) { BN_one(rr); return(1); } BN_CTX_start(ctx); d = BN_CTX_get(ctx); r = BN_CTX_get(ctx); if (d == NULL || r == NULL) goto err; /* If this is not done, things will break in the montgomery * part */ if (in_mont != NULL) mont=in_mont; else { if ((mont=BN_MONT_CTX_new()) == NULL) goto err; if (!BN_MONT_CTX_set(mont,m,ctx)) goto err; } BN_init(&val[0]); ts=1; if (BN_ucmp(a,m) >= 0) { if (!BN_mod(&(val[0]),a,m,ctx)) goto err; aa= &(val[0]); } else aa=a; if (!BN_to_montgomery(&(val[0]),aa,mont,ctx)) goto err; /* 1 */ window = BN_window_bits_for_exponent_size(bits); if (window > 1) { if (!BN_mod_mul_montgomery(d,&(val[0]),&(val[0]),mont,ctx)) goto err; /* 2 */ j=1<<(window-1); for (i=1; i<j; i++) { BN_init(&(val[i])); if (!BN_mod_mul_montgomery(&(val[i]),&(val[i-1]),d,mont,ctx)) goto err; } ts=i; } start=1; /* This is used to avoid multiplication etc * when there is only the value '1' in the * buffer. */ wvalue=0; /* The 'value' of the window */ wstart=bits-1; /* The top bit of the window */ wend=0; /* The bottom bit of the window */ if (!BN_to_montgomery(r,BN_value_one(),mont,ctx)) goto err; for (;;) { if (BN_is_bit_set(p,wstart) == 0) { if (!start) { if (!BN_mod_mul_montgomery(r,r,r,mont,ctx)) goto err; } if (wstart == 0) break; wstart--; continue; } /* We now have wstart on a 'set' bit, we now need to work out * how bit a window to do. To do this we need to scan * forward until the last set bit before the end of the * window */ j=wstart; wvalue=1; wend=0; for (i=1; i<window; i++) { if (wstart-i < 0) break; if (BN_is_bit_set(p,wstart-i)) { wvalue<<=(i-wend); wvalue|=1; wend=i; } } /* wend is the size of the current window */ j=wend+1; /* add the 'bytes above' */ if (!start) for (i=0; i<j; i++) { if (!BN_mod_mul_montgomery(r,r,r,mont,ctx)) goto err; } /* wvalue will be an odd number < 2^window */ if (!BN_mod_mul_montgomery(r,r,&(val[wvalue>>1]),mont,ctx)) goto err; /* move the 'window' down further */ wstart-=wend+1; wvalue=0; start=0; if (wstart < 0) break; } if (!BN_from_montgomery(rr,r,mont,ctx)) goto err; ret=1; err: if ((in_mont == NULL) && (mont != NULL)) BN_MONT_CTX_free(mont); BN_CTX_end(ctx); for (i=0; i<ts; i++) BN_clear_free(&(val[i])); return(ret); } BN_ULONG BN_mod_word(const BIGNUM *a, BN_ULONG w) { #ifndef BN_LLONG BN_ULONG ret=0; #else BN_ULLONG ret=0; #endif int i; w&=BN_MASK2; for (i=a->top-1; i>=0; i--) { #ifndef BN_LLONG ret=((ret<<BN_BITS4)|((a->d[i]>>BN_BITS4)&BN_MASK2l))%w; ret=((ret<<BN_BITS4)|(a->d[i]&BN_MASK2l))%w; #else ret=(BN_ULLONG)(((ret<<(BN_ULLONG)BN_BITS2)|a->d[i])% (BN_ULLONG)w); #endif } return((BN_ULONG)ret); } static int bnrand(int pseudorand, BIGNUM *rnd, int bits, int top, int bottom) { unsigned char *buf=NULL; int ret=0,bit,bytes,mask; if (bits == 0) { BN_zero(rnd); return 1; } bytes=(bits+7)/8; bit=(bits-1)%8; mask=0xff<<bit; buf=(unsigned char *)malloc(bytes); if (buf == NULL) { goto err; } /* make a random number and set the top and bottom bits */ /* this ignores the pseudorand flag */ t_random(buf, bytes); if (top) { if (bit == 0) { buf[0]=1; buf[1]|=0x80; } else { buf[0]|=(3<<(bit-1)); buf[0]&= ~(mask<<1); } } else { buf[0]|=(1<<bit); buf[0]&= ~(mask<<1); } if (bottom) /* set bottom bits to whatever odd is */ buf[bytes-1]|=1; if (!BN_bin2bn(buf,bytes,rnd)) goto err; ret=1; err: if (buf != NULL) { memset(buf,0,bytes); free(buf); } return(ret); } /* BN_pseudo_rand is the same as BN_rand, now. */ int BN_pseudo_rand(BIGNUM *rnd, int bits, int top, int bottom) { return bnrand(1, rnd, bits, top, bottom); } #define MONT_WORD /* use the faster word-based algorithm */ int BN_mod_mul_montgomery(BIGNUM *r, BIGNUM *a, BIGNUM *b, BN_MONT_CTX *mont, BN_CTX *ctx) { BIGNUM *tmp,*tmp2; int ret=0; BN_CTX_start(ctx); tmp = BN_CTX_get(ctx); tmp2 = BN_CTX_get(ctx); if (tmp == NULL || tmp2 == NULL) goto err; bn_check_top(tmp); bn_check_top(tmp2); if (a == b) { if (!BN_sqr(tmp,a,ctx)) goto err; } else { if (!BN_mul(tmp,a,b,ctx)) goto err; } /* reduce from aRR to aR */ if (!BN_from_montgomery(r,tmp,mont,ctx)) goto err; ret=1; err: BN_CTX_end(ctx); return(ret); } int BN_from_montgomery(BIGNUM *ret, BIGNUM *a, BN_MONT_CTX *mont, BN_CTX *ctx) { int retn=0; #ifdef MONT_WORD BIGNUM *n,*r; BN_ULONG *ap,*np,*rp,n0,v,*nrp; int al,nl,max,i,x,ri; BN_CTX_start(ctx); if ((r = BN_CTX_get(ctx)) == NULL) goto err; if (!BN_copy(r,a)) goto err; n= &(mont->N); ap=a->d; /* mont->ri is the size of mont->N in bits (rounded up to the word size) */ al=ri=mont->ri/BN_BITS2; nl=n->top; if ((al == 0) || (nl == 0)) { r->top=0; return(1); } max=(nl+al+1); /* allow for overflow (no?) XXX */ if (bn_wexpand(r,max) == NULL) goto err; if (bn_wexpand(ret,max) == NULL) goto err; r->neg=a->neg^n->neg; np=n->d; rp=r->d; nrp= &(r->d[nl]); /* clear the top words of T */ #if 1 for (i=r->top; i<max; i++) /* memset? XXX */ r->d[i]=0; #else memset(&(r->d[r->top]),0,(max-r->top)*sizeof(BN_ULONG)); #endif r->top=max; n0=mont->n0; #ifdef BN_COUNT printf("word BN_from_montgomery %d * %d\n",nl,nl); #endif for (i=0; i<nl; i++) { #ifdef __TANDEM { long long t1; long long t2; long long t3; t1 = rp[0] * (n0 & 0177777); t2 = 037777600000l; t2 = n0 & t2; t3 = rp[0] & 0177777; t2 = (t3 * t2) & BN_MASK2; t1 = t1 + t2; v=bn_mul_add_words(rp,np,nl,(BN_ULONG) t1); } #else v=bn_mul_add_words(rp,np,nl,(rp[0]*n0)&BN_MASK2); #endif nrp++; rp++; if (((nrp[-1]+=v)&BN_MASK2) >= v) continue; else { if (((++nrp[0])&BN_MASK2) != 0) continue; if (((++nrp[1])&BN_MASK2) != 0) continue; for (x=2; (((++nrp[x])&BN_MASK2) == 0); x++) ; } } bn_fix_top(r); /* mont->ri will be a multiple of the word size */ #if 0 BN_rshift(ret,r,mont->ri); #else ret->neg = r->neg; x=ri; rp=ret->d; ap= &(r->d[x]); if (r->top < x) al=0; else al=r->top-x; ret->top=al; al-=4; for (i=0; i<al; i+=4) { BN_ULONG t1,t2,t3,t4; t1=ap[i+0]; t2=ap[i+1]; t3=ap[i+2]; t4=ap[i+3]; rp[i+0]=t1; rp[i+1]=t2; rp[i+2]=t3; rp[i+3]=t4; } al+=4; for (; i<al; i++) rp[i]=ap[i]; #endif #else /* !MONT_WORD */ BIGNUM *t1,*t2; BN_CTX_start(ctx); t1 = BN_CTX_get(ctx); t2 = BN_CTX_get(ctx); if (t1 == NULL || t2 == NULL) goto err; if (!BN_copy(t1,a)) goto err; BN_mask_bits(t1,mont->ri); if (!BN_mul(t2,t1,&mont->Ni,ctx)) goto err; BN_mask_bits(t2,mont->ri); if (!BN_mul(t1,t2,&mont->N,ctx)) goto err; if (!BN_add(t2,a,t1)) goto err; BN_rshift(ret,t2,mont->ri); #endif /* MONT_WORD */ if (BN_ucmp(ret, &(mont->N)) >= 0) { BN_usub(ret,ret,&(mont->N)); } retn=1; err: BN_CTX_end(ctx); return(retn); } void BN_MONT_CTX_init(BN_MONT_CTX *ctx) { ctx->ri=0; BN_init(&(ctx->RR)); BN_init(&(ctx->N)); BN_init(&(ctx->Ni)); ctx->flags=0; } BN_MONT_CTX *BN_MONT_CTX_new(void) { BN_MONT_CTX *ret; if ((ret=(BN_MONT_CTX *)malloc(sizeof(BN_MONT_CTX))) == NULL) return(NULL); BN_MONT_CTX_init(ret); ret->flags=BN_FLG_MALLOCED; return(ret); } void BN_MONT_CTX_free(BN_MONT_CTX *mont) { if(mont == NULL) return; BN_free(&(mont->RR)); BN_free(&(mont->N)); BN_free(&(mont->Ni)); if (mont->flags & BN_FLG_MALLOCED) free(mont); } int BN_MONT_CTX_set(BN_MONT_CTX *mont, const BIGNUM *mod, BN_CTX *ctx) { BIGNUM Ri,*R; BN_init(&Ri); R= &(mont->RR); /* grab RR as a temp */ BN_copy(&(mont->N),mod); /* Set N */ #ifdef MONT_WORD { BIGNUM tmod; BN_ULONG buf[2]; mont->ri=(BN_num_bits(mod)+(BN_BITS2-1))/BN_BITS2*BN_BITS2; BN_zero(R); BN_set_bit(R,BN_BITS2); /* R */ buf[0]=mod->d[0]; /* tmod = N mod word size */ buf[1]=0; tmod.d=buf; tmod.top=1; tmod.dmax=2; tmod.neg=mod->neg; /* Ri = R^-1 mod N*/ if ((BN_mod_inverse(&Ri,R,&tmod,ctx)) == NULL) goto err; BN_lshift(&Ri,&Ri,BN_BITS2); /* R*Ri */ if (!BN_is_zero(&Ri)) BN_sub_word(&Ri,1); else /* if N mod word size == 1 */ BN_set_word(&Ri,BN_MASK2); /* Ri-- (mod word size) */ BN_div(&Ri,NULL,&Ri,&tmod,ctx); /* Ni = (R*Ri-1)/N, * keep only least significant word: */ mont->n0=Ri.d[0]; BN_free(&Ri); } #else /* !MONT_WORD */ { /* bignum version */ mont->ri=BN_num_bits(mod); BN_zero(R); BN_set_bit(R,mont->ri); /* R = 2^ri */ /* Ri = R^-1 mod N*/ if ((BN_mod_inverse(&Ri,R,mod,ctx)) == NULL) goto err; BN_lshift(&Ri,&Ri,mont->ri); /* R*Ri */ BN_sub_word(&Ri,1); /* Ni = (R*Ri-1) / N */ BN_div(&(mont->Ni),NULL,&Ri,mod,ctx); BN_free(&Ri); } #endif /* setup RR for conversions */ BN_zero(&(mont->RR)); BN_set_bit(&(mont->RR),mont->ri*2); BN_mod(&(mont->RR),&(mont->RR),&(mont->N),ctx); return(1); err: return(0); } BIGNUM *BN_value_one(void) { static BN_ULONG data_one=1L; static BIGNUM const_one={&data_one,1,1,0}; return(&const_one); } /* solves ax == 1 (mod n) */ BIGNUM *BN_mod_inverse(BIGNUM *in, BIGNUM *a, const BIGNUM *n, BN_CTX *ctx) { BIGNUM *A,*B,*X,*Y,*M,*D,*R=NULL; BIGNUM *T,*ret=NULL; int sign; bn_check_top(a); bn_check_top(n); BN_CTX_start(ctx); A = BN_CTX_get(ctx); B = BN_CTX_get(ctx); X = BN_CTX_get(ctx); D = BN_CTX_get(ctx); M = BN_CTX_get(ctx); Y = BN_CTX_get(ctx); if (Y == NULL) goto err; if (in == NULL) R=BN_new(); else R=in; if (R == NULL) goto err; BN_zero(X); BN_one(Y); if (BN_copy(A,a) == NULL) goto err; if (BN_copy(B,n) == NULL) goto err; sign=1; while (!BN_is_zero(B)) { if (!BN_div(D,M,A,B,ctx)) goto err; T=A; A=B; B=M; /* T has a struct, M does not */ if (!BN_mul(T,D,X,ctx)) goto err; if (!BN_add(T,T,Y)) goto err; M=Y; Y=X; X=T; sign= -sign; } if (sign < 0) { if (!BN_sub(Y,n,Y)) goto err; } if (BN_is_one(A)) { if (!BN_mod(R,Y,n,ctx)) goto err; } else { goto err; } ret=R; err: if ((ret == NULL) && (in == NULL)) BN_free(R); BN_CTX_end(ctx); return(ret); } int BN_set_bit(BIGNUM *a, int n) { int i,j,k; i=n/BN_BITS2; j=n%BN_BITS2; if (a->top <= i) { if (bn_wexpand(a,i+1) == NULL) return(0); for(k=a->top; k<i+1; k++) a->d[k]=0; a->top=i+1; } a->d[i]|=(((BN_ULONG)1)<<j); return(1); }
gpl-2.0
KylinUI/android_kernel_samsung_t1
arch/arm/plat-samsung/dev-i2c3.c
2056
1576
/* linux/arch/arm/plat-samsung/dev-i2c3.c * * Copyright (c) 2010 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * S5P series device definition for i2c device 3 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/gfp.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/platform_device.h> #include <mach/irqs.h> #include <mach/map.h> #include <plat/regs-iic.h> #include <plat/iic.h> #include <plat/devs.h> #include <plat/cpu.h> static struct resource s3c_i2c_resource[] = { [0] = { .start = S3C_PA_IIC3, .end = S3C_PA_IIC3 + SZ_4K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_IIC3, .end = IRQ_IIC3, .flags = IORESOURCE_IRQ, }, }; struct platform_device s3c_device_i2c3 = { .name = "s3c2440-i2c", .id = 3, .num_resources = ARRAY_SIZE(s3c_i2c_resource), .resource = s3c_i2c_resource, }; static struct s3c2410_platform_i2c default_i2c_data3 __initdata = { .flags = 0, .bus_num = 3, .slave_addr = 0x10, .frequency = 100*1000, .sda_delay = 100, }; void __init s3c_i2c3_set_platdata(struct s3c2410_platform_i2c *pd) { struct s3c2410_platform_i2c *npd; if (!pd) pd = &default_i2c_data3; npd = kmemdup(pd, sizeof(struct s3c2410_platform_i2c), GFP_KERNEL); if (!npd) printk(KERN_ERR "%s: no memory for platform data\n", __func__); else if (!npd->cfg_gpio) npd->cfg_gpio = s3c_i2c3_cfg_gpio; s3c_device_i2c3.dev.platform_data = npd; }
gpl-2.0
h8rift/android_kernel_htc_msm8960-evita
arch/arm/plat-samsung/dev-i2c3.c
2056
1576
/* linux/arch/arm/plat-samsung/dev-i2c3.c * * Copyright (c) 2010 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * S5P series device definition for i2c device 3 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/gfp.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/platform_device.h> #include <mach/irqs.h> #include <mach/map.h> #include <plat/regs-iic.h> #include <plat/iic.h> #include <plat/devs.h> #include <plat/cpu.h> static struct resource s3c_i2c_resource[] = { [0] = { .start = S3C_PA_IIC3, .end = S3C_PA_IIC3 + SZ_4K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_IIC3, .end = IRQ_IIC3, .flags = IORESOURCE_IRQ, }, }; struct platform_device s3c_device_i2c3 = { .name = "s3c2440-i2c", .id = 3, .num_resources = ARRAY_SIZE(s3c_i2c_resource), .resource = s3c_i2c_resource, }; static struct s3c2410_platform_i2c default_i2c_data3 __initdata = { .flags = 0, .bus_num = 3, .slave_addr = 0x10, .frequency = 100*1000, .sda_delay = 100, }; void __init s3c_i2c3_set_platdata(struct s3c2410_platform_i2c *pd) { struct s3c2410_platform_i2c *npd; if (!pd) pd = &default_i2c_data3; npd = kmemdup(pd, sizeof(struct s3c2410_platform_i2c), GFP_KERNEL); if (!npd) printk(KERN_ERR "%s: no memory for platform data\n", __func__); else if (!npd->cfg_gpio) npd->cfg_gpio = s3c_i2c3_cfg_gpio; s3c_device_i2c3.dev.platform_data = npd; }
gpl-2.0
Spartaner25/android_kernel_quanta_fg6q
net/ipv4/fib_trie.c
2056
63143
/* * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet * & Swedish University of Agricultural Sciences. * * Jens Laas <jens.laas@data.slu.se> Swedish University of * Agricultural Sciences. * * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet * * This work is based on the LPC-trie which is originally described in: * * An experimental study of compression methods for dynamic tries * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002. * http://www.csc.kth.se/~snilsson/software/dyntrie2/ * * * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999 * * * Code from fib_hash has been reused which includes the following header: * * * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * IPv4 FIB: lookup engine and maintenance routines. * * * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Substantial contributions to this work comes from: * * David S. Miller, <davem@davemloft.net> * Stephen Hemminger <shemminger@osdl.org> * Paul E. McKenney <paulmck@us.ibm.com> * Patrick McHardy <kaber@trash.net> */ #define VERSION "0.409" #include <asm/uaccess.h> #include <linux/bitops.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/errno.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/inetdevice.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/proc_fs.h> #include <linux/rcupdate.h> #include <linux/skbuff.h> #include <linux/netlink.h> #include <linux/init.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/prefetch.h> #include <linux/export.h> #include <net/net_namespace.h> #include <net/ip.h> #include <net/protocol.h> #include <net/route.h> #include <net/tcp.h> #include <net/sock.h> #include <net/ip_fib.h> #include "fib_lookup.h" #define MAX_STAT_DEPTH 32 #define KEYLENGTH (8*sizeof(t_key)) typedef unsigned int t_key; #define T_TNODE 0 #define T_LEAF 1 #define NODE_TYPE_MASK 0x1UL #define NODE_TYPE(node) ((node)->parent & NODE_TYPE_MASK) #define IS_TNODE(n) (!(n->parent & T_LEAF)) #define IS_LEAF(n) (n->parent & T_LEAF) struct rt_trie_node { unsigned long parent; t_key key; }; struct leaf { unsigned long parent; t_key key; struct hlist_head list; struct rcu_head rcu; }; struct leaf_info { struct hlist_node hlist; int plen; u32 mask_plen; /* ntohl(inet_make_mask(plen)) */ struct list_head falh; struct rcu_head rcu; }; struct tnode { unsigned long parent; t_key key; unsigned char pos; /* 2log(KEYLENGTH) bits needed */ unsigned char bits; /* 2log(KEYLENGTH) bits needed */ unsigned int full_children; /* KEYLENGTH bits needed */ unsigned int empty_children; /* KEYLENGTH bits needed */ union { struct rcu_head rcu; struct work_struct work; struct tnode *tnode_free; }; struct rt_trie_node __rcu *child[0]; }; #ifdef CONFIG_IP_FIB_TRIE_STATS struct trie_use_stats { unsigned int gets; unsigned int backtrack; unsigned int semantic_match_passed; unsigned int semantic_match_miss; unsigned int null_node_hit; unsigned int resize_node_skipped; }; #endif struct trie_stat { unsigned int totdepth; unsigned int maxdepth; unsigned int tnodes; unsigned int leaves; unsigned int nullpointers; unsigned int prefixes; unsigned int nodesizes[MAX_STAT_DEPTH]; }; struct trie { struct rt_trie_node __rcu *trie; #ifdef CONFIG_IP_FIB_TRIE_STATS struct trie_use_stats stats; #endif }; static void put_child(struct trie *t, struct tnode *tn, int i, struct rt_trie_node *n); static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *n, int wasfull); static struct rt_trie_node *resize(struct trie *t, struct tnode *tn); static struct tnode *inflate(struct trie *t, struct tnode *tn); static struct tnode *halve(struct trie *t, struct tnode *tn); /* tnodes to free after resize(); protected by RTNL */ static struct tnode *tnode_free_head; static size_t tnode_free_size; /* * synchronize_rcu after call_rcu for that many pages; it should be especially * useful before resizing the root node with PREEMPT_NONE configs; the value was * obtained experimentally, aiming to avoid visible slowdown. */ static const int sync_pages = 128; static struct kmem_cache *fn_alias_kmem __read_mostly; static struct kmem_cache *trie_leaf_kmem __read_mostly; /* * caller must hold RTNL */ static inline struct tnode *node_parent(const struct rt_trie_node *node) { unsigned long parent; parent = rcu_dereference_index_check(node->parent, lockdep_rtnl_is_held()); return (struct tnode *)(parent & ~NODE_TYPE_MASK); } /* * caller must hold RCU read lock or RTNL */ static inline struct tnode *node_parent_rcu(const struct rt_trie_node *node) { unsigned long parent; parent = rcu_dereference_index_check(node->parent, rcu_read_lock_held() || lockdep_rtnl_is_held()); return (struct tnode *)(parent & ~NODE_TYPE_MASK); } /* Same as rcu_assign_pointer * but that macro() assumes that value is a pointer. */ static inline void node_set_parent(struct rt_trie_node *node, struct tnode *ptr) { smp_wmb(); node->parent = (unsigned long)ptr | NODE_TYPE(node); } /* * caller must hold RTNL */ static inline struct rt_trie_node *tnode_get_child(const struct tnode *tn, unsigned int i) { BUG_ON(i >= 1U << tn->bits); return rtnl_dereference(tn->child[i]); } /* * caller must hold RCU read lock or RTNL */ static inline struct rt_trie_node *tnode_get_child_rcu(const struct tnode *tn, unsigned int i) { BUG_ON(i >= 1U << tn->bits); return rcu_dereference_rtnl(tn->child[i]); } static inline int tnode_child_length(const struct tnode *tn) { return 1 << tn->bits; } static inline t_key mask_pfx(t_key k, unsigned int l) { return (l == 0) ? 0 : k >> (KEYLENGTH-l) << (KEYLENGTH-l); } static inline t_key tkey_extract_bits(t_key a, unsigned int offset, unsigned int bits) { if (offset < KEYLENGTH) return ((t_key)(a << offset)) >> (KEYLENGTH - bits); else return 0; } static inline int tkey_equals(t_key a, t_key b) { return a == b; } static inline int tkey_sub_equals(t_key a, int offset, int bits, t_key b) { if (bits == 0 || offset >= KEYLENGTH) return 1; bits = bits > KEYLENGTH ? KEYLENGTH : bits; return ((a ^ b) << offset) >> (KEYLENGTH - bits) == 0; } static inline int tkey_mismatch(t_key a, int offset, t_key b) { t_key diff = a ^ b; int i = offset; if (!diff) return 0; while ((diff << i) >> (KEYLENGTH-1) == 0) i++; return i; } /* To understand this stuff, an understanding of keys and all their bits is necessary. Every node in the trie has a key associated with it, but not all of the bits in that key are significant. Consider a node 'n' and its parent 'tp'. If n is a leaf, every bit in its key is significant. Its presence is necessitated by path compression, since during a tree traversal (when searching for a leaf - unless we are doing an insertion) we will completely ignore all skipped bits we encounter. Thus we need to verify, at the end of a potentially successful search, that we have indeed been walking the correct key path. Note that we can never "miss" the correct key in the tree if present by following the wrong path. Path compression ensures that segments of the key that are the same for all keys with a given prefix are skipped, but the skipped part *is* identical for each node in the subtrie below the skipped bit! trie_insert() in this implementation takes care of that - note the call to tkey_sub_equals() in trie_insert(). if n is an internal node - a 'tnode' here, the various parts of its key have many different meanings. Example: _________________________________________________________________ | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C | ----------------------------------------------------------------- 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 _________________________________________________________________ | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u | ----------------------------------------------------------------- 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 tp->pos = 7 tp->bits = 3 n->pos = 15 n->bits = 4 First, let's just ignore the bits that come before the parent tp, that is the bits from 0 to (tp->pos-1). They are *known* but at this point we do not use them for anything. The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the index into the parent's child array. That is, they will be used to find 'n' among tp's children. The bits from (tp->pos + tp->bits) to (n->pos - 1) - "S" - are skipped bits for the node n. All the bits we have seen so far are significant to the node n. The rest of the bits are really not needed or indeed known in n->key. The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into n's child array, and will of course be different for each child. The rest of the bits, from (n->pos + n->bits) onward, are completely unknown at this point. */ static inline void check_tnode(const struct tnode *tn) { WARN_ON(tn && tn->pos+tn->bits > 32); } static const int halve_threshold = 25; static const int inflate_threshold = 50; static const int halve_threshold_root = 15; static const int inflate_threshold_root = 30; static void __alias_free_mem(struct rcu_head *head) { struct fib_alias *fa = container_of(head, struct fib_alias, rcu); kmem_cache_free(fn_alias_kmem, fa); } static inline void alias_free_mem_rcu(struct fib_alias *fa) { call_rcu(&fa->rcu, __alias_free_mem); } static void __leaf_free_rcu(struct rcu_head *head) { struct leaf *l = container_of(head, struct leaf, rcu); kmem_cache_free(trie_leaf_kmem, l); } static inline void free_leaf(struct leaf *l) { call_rcu_bh(&l->rcu, __leaf_free_rcu); } static inline void free_leaf_info(struct leaf_info *leaf) { kfree_rcu(leaf, rcu); } static struct tnode *tnode_alloc(size_t size) { if (size <= PAGE_SIZE) return kzalloc(size, GFP_KERNEL); else return vzalloc(size); } static void __tnode_vfree(struct work_struct *arg) { struct tnode *tn = container_of(arg, struct tnode, work); vfree(tn); } static void __tnode_free_rcu(struct rcu_head *head) { struct tnode *tn = container_of(head, struct tnode, rcu); size_t size = sizeof(struct tnode) + (sizeof(struct rt_trie_node *) << tn->bits); if (size <= PAGE_SIZE) kfree(tn); else { INIT_WORK(&tn->work, __tnode_vfree); schedule_work(&tn->work); } } static inline void tnode_free(struct tnode *tn) { if (IS_LEAF(tn)) free_leaf((struct leaf *) tn); else call_rcu(&tn->rcu, __tnode_free_rcu); } static void tnode_free_safe(struct tnode *tn) { BUG_ON(IS_LEAF(tn)); tn->tnode_free = tnode_free_head; tnode_free_head = tn; tnode_free_size += sizeof(struct tnode) + (sizeof(struct rt_trie_node *) << tn->bits); } static void tnode_free_flush(void) { struct tnode *tn; while ((tn = tnode_free_head)) { tnode_free_head = tn->tnode_free; tn->tnode_free = NULL; tnode_free(tn); } if (tnode_free_size >= PAGE_SIZE * sync_pages) { tnode_free_size = 0; synchronize_rcu(); } } static struct leaf *leaf_new(void) { struct leaf *l = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL); if (l) { l->parent = T_LEAF; INIT_HLIST_HEAD(&l->list); } return l; } static struct leaf_info *leaf_info_new(int plen) { struct leaf_info *li = kmalloc(sizeof(struct leaf_info), GFP_KERNEL); if (li) { li->plen = plen; li->mask_plen = ntohl(inet_make_mask(plen)); INIT_LIST_HEAD(&li->falh); } return li; } static struct tnode *tnode_new(t_key key, int pos, int bits) { size_t sz = sizeof(struct tnode) + (sizeof(struct rt_trie_node *) << bits); struct tnode *tn = tnode_alloc(sz); if (tn) { tn->parent = T_TNODE; tn->pos = pos; tn->bits = bits; tn->key = key; tn->full_children = 0; tn->empty_children = 1<<bits; } pr_debug("AT %p s=%zu %zu\n", tn, sizeof(struct tnode), sizeof(struct rt_trie_node) << bits); return tn; } /* * Check whether a tnode 'n' is "full", i.e. it is an internal node * and no bits are skipped. See discussion in dyntree paper p. 6 */ static inline int tnode_full(const struct tnode *tn, const struct rt_trie_node *n) { if (n == NULL || IS_LEAF(n)) return 0; return ((struct tnode *) n)->pos == tn->pos + tn->bits; } static inline void put_child(struct trie *t, struct tnode *tn, int i, struct rt_trie_node *n) { tnode_put_child_reorg(tn, i, n, -1); } /* * Add a child at position i overwriting the old value. * Update the value of full_children and empty_children. */ static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *n, int wasfull) { struct rt_trie_node *chi = rtnl_dereference(tn->child[i]); int isfull; BUG_ON(i >= 1<<tn->bits); /* update emptyChildren */ if (n == NULL && chi != NULL) tn->empty_children++; else if (n != NULL && chi == NULL) tn->empty_children--; /* update fullChildren */ if (wasfull == -1) wasfull = tnode_full(tn, chi); isfull = tnode_full(tn, n); if (wasfull && !isfull) tn->full_children--; else if (!wasfull && isfull) tn->full_children++; if (n) node_set_parent(n, tn); rcu_assign_pointer(tn->child[i], n); } #define MAX_WORK 10 static struct rt_trie_node *resize(struct trie *t, struct tnode *tn) { int i; struct tnode *old_tn; int inflate_threshold_use; int halve_threshold_use; int max_work; if (!tn) return NULL; pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n", tn, inflate_threshold, halve_threshold); /* No children */ if (tn->empty_children == tnode_child_length(tn)) { tnode_free_safe(tn); return NULL; } /* One child */ if (tn->empty_children == tnode_child_length(tn) - 1) goto one_child; /* * Double as long as the resulting node has a number of * nonempty nodes that are above the threshold. */ /* * From "Implementing a dynamic compressed trie" by Stefan Nilsson of * the Helsinki University of Technology and Matti Tikkanen of Nokia * Telecommunications, page 6: * "A node is doubled if the ratio of non-empty children to all * children in the *doubled* node is at least 'high'." * * 'high' in this instance is the variable 'inflate_threshold'. It * is expressed as a percentage, so we multiply it with * tnode_child_length() and instead of multiplying by 2 (since the * child array will be doubled by inflate()) and multiplying * the left-hand side by 100 (to handle the percentage thing) we * multiply the left-hand side by 50. * * The left-hand side may look a bit weird: tnode_child_length(tn) * - tn->empty_children is of course the number of non-null children * in the current node. tn->full_children is the number of "full" * children, that is non-null tnodes with a skip value of 0. * All of those will be doubled in the resulting inflated tnode, so * we just count them one extra time here. * * A clearer way to write this would be: * * to_be_doubled = tn->full_children; * not_to_be_doubled = tnode_child_length(tn) - tn->empty_children - * tn->full_children; * * new_child_length = tnode_child_length(tn) * 2; * * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) / * new_child_length; * if (new_fill_factor >= inflate_threshold) * * ...and so on, tho it would mess up the while () loop. * * anyway, * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >= * inflate_threshold * * avoid a division: * 100 * (not_to_be_doubled + 2*to_be_doubled) >= * inflate_threshold * new_child_length * * expand not_to_be_doubled and to_be_doubled, and shorten: * 100 * (tnode_child_length(tn) - tn->empty_children + * tn->full_children) >= inflate_threshold * new_child_length * * expand new_child_length: * 100 * (tnode_child_length(tn) - tn->empty_children + * tn->full_children) >= * inflate_threshold * tnode_child_length(tn) * 2 * * shorten again: * 50 * (tn->full_children + tnode_child_length(tn) - * tn->empty_children) >= inflate_threshold * * tnode_child_length(tn) * */ check_tnode(tn); /* Keep root node larger */ if (!node_parent((struct rt_trie_node *)tn)) { inflate_threshold_use = inflate_threshold_root; halve_threshold_use = halve_threshold_root; } else { inflate_threshold_use = inflate_threshold; halve_threshold_use = halve_threshold; } max_work = MAX_WORK; while ((tn->full_children > 0 && max_work-- && 50 * (tn->full_children + tnode_child_length(tn) - tn->empty_children) >= inflate_threshold_use * tnode_child_length(tn))) { old_tn = tn; tn = inflate(t, tn); if (IS_ERR(tn)) { tn = old_tn; #ifdef CONFIG_IP_FIB_TRIE_STATS t->stats.resize_node_skipped++; #endif break; } } check_tnode(tn); /* Return if at least one inflate is run */ if (max_work != MAX_WORK) return (struct rt_trie_node *) tn; /* * Halve as long as the number of empty children in this * node is above threshold. */ max_work = MAX_WORK; while (tn->bits > 1 && max_work-- && 100 * (tnode_child_length(tn) - tn->empty_children) < halve_threshold_use * tnode_child_length(tn)) { old_tn = tn; tn = halve(t, tn); if (IS_ERR(tn)) { tn = old_tn; #ifdef CONFIG_IP_FIB_TRIE_STATS t->stats.resize_node_skipped++; #endif break; } } /* Only one child remains */ if (tn->empty_children == tnode_child_length(tn) - 1) { one_child: for (i = 0; i < tnode_child_length(tn); i++) { struct rt_trie_node *n; n = rtnl_dereference(tn->child[i]); if (!n) continue; /* compress one level */ node_set_parent(n, NULL); tnode_free_safe(tn); return n; } } return (struct rt_trie_node *) tn; } static void tnode_clean_free(struct tnode *tn) { int i; struct tnode *tofree; for (i = 0; i < tnode_child_length(tn); i++) { tofree = (struct tnode *)rtnl_dereference(tn->child[i]); if (tofree) tnode_free(tofree); } tnode_free(tn); } static struct tnode *inflate(struct trie *t, struct tnode *tn) { struct tnode *oldtnode = tn; int olen = tnode_child_length(tn); int i; pr_debug("In inflate\n"); tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits + 1); if (!tn) return ERR_PTR(-ENOMEM); /* * Preallocate and store tnodes before the actual work so we * don't get into an inconsistent state if memory allocation * fails. In case of failure we return the oldnode and inflate * of tnode is ignored. */ for (i = 0; i < olen; i++) { struct tnode *inode; inode = (struct tnode *) tnode_get_child(oldtnode, i); if (inode && IS_TNODE(inode) && inode->pos == oldtnode->pos + oldtnode->bits && inode->bits > 1) { struct tnode *left, *right; t_key m = ~0U << (KEYLENGTH - 1) >> inode->pos; left = tnode_new(inode->key&(~m), inode->pos + 1, inode->bits - 1); if (!left) goto nomem; right = tnode_new(inode->key|m, inode->pos + 1, inode->bits - 1); if (!right) { tnode_free(left); goto nomem; } put_child(t, tn, 2*i, (struct rt_trie_node *) left); put_child(t, tn, 2*i+1, (struct rt_trie_node *) right); } } for (i = 0; i < olen; i++) { struct tnode *inode; struct rt_trie_node *node = tnode_get_child(oldtnode, i); struct tnode *left, *right; int size, j; /* An empty child */ if (node == NULL) continue; /* A leaf or an internal node with skipped bits */ if (IS_LEAF(node) || ((struct tnode *) node)->pos > tn->pos + tn->bits - 1) { if (tkey_extract_bits(node->key, oldtnode->pos + oldtnode->bits, 1) == 0) put_child(t, tn, 2*i, node); else put_child(t, tn, 2*i+1, node); continue; } /* An internal node with two children */ inode = (struct tnode *) node; if (inode->bits == 1) { put_child(t, tn, 2*i, rtnl_dereference(inode->child[0])); put_child(t, tn, 2*i+1, rtnl_dereference(inode->child[1])); tnode_free_safe(inode); continue; } /* An internal node with more than two children */ /* We will replace this node 'inode' with two new * ones, 'left' and 'right', each with half of the * original children. The two new nodes will have * a position one bit further down the key and this * means that the "significant" part of their keys * (see the discussion near the top of this file) * will differ by one bit, which will be "0" in * left's key and "1" in right's key. Since we are * moving the key position by one step, the bit that * we are moving away from - the bit at position * (inode->pos) - is the one that will differ between * left and right. So... we synthesize that bit in the * two new keys. * The mask 'm' below will be a single "one" bit at * the position (inode->pos) */ /* Use the old key, but set the new significant * bit to zero. */ left = (struct tnode *) tnode_get_child(tn, 2*i); put_child(t, tn, 2*i, NULL); BUG_ON(!left); right = (struct tnode *) tnode_get_child(tn, 2*i+1); put_child(t, tn, 2*i+1, NULL); BUG_ON(!right); size = tnode_child_length(left); for (j = 0; j < size; j++) { put_child(t, left, j, rtnl_dereference(inode->child[j])); put_child(t, right, j, rtnl_dereference(inode->child[j + size])); } put_child(t, tn, 2*i, resize(t, left)); put_child(t, tn, 2*i+1, resize(t, right)); tnode_free_safe(inode); } tnode_free_safe(oldtnode); return tn; nomem: tnode_clean_free(tn); return ERR_PTR(-ENOMEM); } static struct tnode *halve(struct trie *t, struct tnode *tn) { struct tnode *oldtnode = tn; struct rt_trie_node *left, *right; int i; int olen = tnode_child_length(tn); pr_debug("In halve\n"); tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits - 1); if (!tn) return ERR_PTR(-ENOMEM); /* * Preallocate and store tnodes before the actual work so we * don't get into an inconsistent state if memory allocation * fails. In case of failure we return the oldnode and halve * of tnode is ignored. */ for (i = 0; i < olen; i += 2) { left = tnode_get_child(oldtnode, i); right = tnode_get_child(oldtnode, i+1); /* Two nonempty children */ if (left && right) { struct tnode *newn; newn = tnode_new(left->key, tn->pos + tn->bits, 1); if (!newn) goto nomem; put_child(t, tn, i/2, (struct rt_trie_node *)newn); } } for (i = 0; i < olen; i += 2) { struct tnode *newBinNode; left = tnode_get_child(oldtnode, i); right = tnode_get_child(oldtnode, i+1); /* At least one of the children is empty */ if (left == NULL) { if (right == NULL) /* Both are empty */ continue; put_child(t, tn, i/2, right); continue; } if (right == NULL) { put_child(t, tn, i/2, left); continue; } /* Two nonempty children */ newBinNode = (struct tnode *) tnode_get_child(tn, i/2); put_child(t, tn, i/2, NULL); put_child(t, newBinNode, 0, left); put_child(t, newBinNode, 1, right); put_child(t, tn, i/2, resize(t, newBinNode)); } tnode_free_safe(oldtnode); return tn; nomem: tnode_clean_free(tn); return ERR_PTR(-ENOMEM); } /* readside must use rcu_read_lock currently dump routines via get_fa_head and dump */ static struct leaf_info *find_leaf_info(struct leaf *l, int plen) { struct hlist_head *head = &l->list; struct hlist_node *node; struct leaf_info *li; hlist_for_each_entry_rcu(li, node, head, hlist) if (li->plen == plen) return li; return NULL; } static inline struct list_head *get_fa_head(struct leaf *l, int plen) { struct leaf_info *li = find_leaf_info(l, plen); if (!li) return NULL; return &li->falh; } static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new) { struct leaf_info *li = NULL, *last = NULL; struct hlist_node *node; if (hlist_empty(head)) { hlist_add_head_rcu(&new->hlist, head); } else { hlist_for_each_entry(li, node, head, hlist) { if (new->plen > li->plen) break; last = li; } if (last) hlist_add_after_rcu(&last->hlist, &new->hlist); else hlist_add_before_rcu(&new->hlist, &li->hlist); } } /* rcu_read_lock needs to be hold by caller from readside */ static struct leaf * fib_find_node(struct trie *t, u32 key) { int pos; struct tnode *tn; struct rt_trie_node *n; pos = 0; n = rcu_dereference_rtnl(t->trie); while (n != NULL && NODE_TYPE(n) == T_TNODE) { tn = (struct tnode *) n; check_tnode(tn); if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) { pos = tn->pos + tn->bits; n = tnode_get_child_rcu(tn, tkey_extract_bits(key, tn->pos, tn->bits)); } else break; } /* Case we have found a leaf. Compare prefixes */ if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key)) return (struct leaf *)n; return NULL; } static void trie_rebalance(struct trie *t, struct tnode *tn) { int wasfull; t_key cindex, key; struct tnode *tp; key = tn->key; while (tn != NULL && (tp = node_parent((struct rt_trie_node *)tn)) != NULL) { cindex = tkey_extract_bits(key, tp->pos, tp->bits); wasfull = tnode_full(tp, tnode_get_child(tp, cindex)); tn = (struct tnode *) resize(t, (struct tnode *)tn); tnode_put_child_reorg((struct tnode *)tp, cindex, (struct rt_trie_node *)tn, wasfull); tp = node_parent((struct rt_trie_node *) tn); if (!tp) rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn); tnode_free_flush(); if (!tp) break; tn = tp; } /* Handle last (top) tnode */ if (IS_TNODE(tn)) tn = (struct tnode *)resize(t, (struct tnode *)tn); rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn); tnode_free_flush(); } /* only used from updater-side */ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen) { int pos, newpos; struct tnode *tp = NULL, *tn = NULL; struct rt_trie_node *n; struct leaf *l; int missbit; struct list_head *fa_head = NULL; struct leaf_info *li; t_key cindex; pos = 0; n = rtnl_dereference(t->trie); /* If we point to NULL, stop. Either the tree is empty and we should * just put a new leaf in if, or we have reached an empty child slot, * and we should just put our new leaf in that. * If we point to a T_TNODE, check if it matches our key. Note that * a T_TNODE might be skipping any number of bits - its 'pos' need * not be the parent's 'pos'+'bits'! * * If it does match the current key, get pos/bits from it, extract * the index from our key, push the T_TNODE and walk the tree. * * If it doesn't, we have to replace it with a new T_TNODE. * * If we point to a T_LEAF, it might or might not have the same key * as we do. If it does, just change the value, update the T_LEAF's * value, and return it. * If it doesn't, we need to replace it with a T_TNODE. */ while (n != NULL && NODE_TYPE(n) == T_TNODE) { tn = (struct tnode *) n; check_tnode(tn); if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) { tp = tn; pos = tn->pos + tn->bits; n = tnode_get_child(tn, tkey_extract_bits(key, tn->pos, tn->bits)); BUG_ON(n && node_parent(n) != tn); } else break; } /* * n ----> NULL, LEAF or TNODE * * tp is n's (parent) ----> NULL or TNODE */ BUG_ON(tp && IS_LEAF(tp)); /* Case 1: n is a leaf. Compare prefixes */ if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key)) { l = (struct leaf *) n; li = leaf_info_new(plen); if (!li) return NULL; fa_head = &li->falh; insert_leaf_info(&l->list, li); goto done; } l = leaf_new(); if (!l) return NULL; l->key = key; li = leaf_info_new(plen); if (!li) { free_leaf(l); return NULL; } fa_head = &li->falh; insert_leaf_info(&l->list, li); if (t->trie && n == NULL) { /* Case 2: n is NULL, and will just insert a new leaf */ node_set_parent((struct rt_trie_node *)l, tp); cindex = tkey_extract_bits(key, tp->pos, tp->bits); put_child(t, (struct tnode *)tp, cindex, (struct rt_trie_node *)l); } else { /* Case 3: n is a LEAF or a TNODE and the key doesn't match. */ /* * Add a new tnode here * first tnode need some special handling */ if (tp) pos = tp->pos+tp->bits; else pos = 0; if (n) { newpos = tkey_mismatch(key, pos, n->key); tn = tnode_new(n->key, newpos, 1); } else { newpos = 0; tn = tnode_new(key, newpos, 1); /* First tnode */ } if (!tn) { free_leaf_info(li); free_leaf(l); return NULL; } node_set_parent((struct rt_trie_node *)tn, tp); missbit = tkey_extract_bits(key, newpos, 1); put_child(t, tn, missbit, (struct rt_trie_node *)l); put_child(t, tn, 1-missbit, n); if (tp) { cindex = tkey_extract_bits(key, tp->pos, tp->bits); put_child(t, (struct tnode *)tp, cindex, (struct rt_trie_node *)tn); } else { rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn); tp = tn; } } if (tp && tp->pos + tp->bits > 32) pr_warn("fib_trie tp=%p pos=%d, bits=%d, key=%0x plen=%d\n", tp, tp->pos, tp->bits, key, plen); /* Rebalance the trie */ trie_rebalance(t, tp); done: return fa_head; } /* * Caller must hold RTNL. */ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg) { struct trie *t = (struct trie *) tb->tb_data; struct fib_alias *fa, *new_fa; struct list_head *fa_head = NULL; struct fib_info *fi; int plen = cfg->fc_dst_len; u8 tos = cfg->fc_tos; u32 key, mask; int err; struct leaf *l; if (plen > 32) return -EINVAL; key = ntohl(cfg->fc_dst); pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen); mask = ntohl(inet_make_mask(plen)); if (key & ~mask) return -EINVAL; key = key & mask; fi = fib_create_info(cfg); if (IS_ERR(fi)) { err = PTR_ERR(fi); goto err; } l = fib_find_node(t, key); fa = NULL; if (l) { fa_head = get_fa_head(l, plen); fa = fib_find_alias(fa_head, tos, fi->fib_priority); } /* Now fa, if non-NULL, points to the first fib alias * with the same keys [prefix,tos,priority], if such key already * exists or to the node before which we will insert new one. * * If fa is NULL, we will need to allocate a new one and * insert to the head of f. * * If f is NULL, no fib node matched the destination key * and we need to allocate a new one of those as well. */ if (fa && fa->fa_tos == tos && fa->fa_info->fib_priority == fi->fib_priority) { struct fib_alias *fa_first, *fa_match; err = -EEXIST; if (cfg->fc_nlflags & NLM_F_EXCL) goto out; /* We have 2 goals: * 1. Find exact match for type, scope, fib_info to avoid * duplicate routes * 2. Find next 'fa' (or head), NLM_F_APPEND inserts before it */ fa_match = NULL; fa_first = fa; fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list); list_for_each_entry_continue(fa, fa_head, fa_list) { if (fa->fa_tos != tos) break; if (fa->fa_info->fib_priority != fi->fib_priority) break; if (fa->fa_type == cfg->fc_type && fa->fa_info == fi) { fa_match = fa; break; } } if (cfg->fc_nlflags & NLM_F_REPLACE) { struct fib_info *fi_drop; u8 state; fa = fa_first; if (fa_match) { if (fa == fa_match) err = 0; goto out; } err = -ENOBUFS; new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL); if (new_fa == NULL) goto out; fi_drop = fa->fa_info; new_fa->fa_tos = fa->fa_tos; new_fa->fa_info = fi; new_fa->fa_type = cfg->fc_type; state = fa->fa_state; new_fa->fa_state = state & ~FA_S_ACCESSED; list_replace_rcu(&fa->fa_list, &new_fa->fa_list); alias_free_mem_rcu(fa); fib_release_info(fi_drop); if (state & FA_S_ACCESSED) rt_cache_flush(cfg->fc_nlinfo.nl_net, -1); rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE); goto succeeded; } /* Error if we find a perfect match which * uses the same scope, type, and nexthop * information. */ if (fa_match) goto out; if (!(cfg->fc_nlflags & NLM_F_APPEND)) fa = fa_first; } err = -ENOENT; if (!(cfg->fc_nlflags & NLM_F_CREATE)) goto out; err = -ENOBUFS; new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL); if (new_fa == NULL) goto out; new_fa->fa_info = fi; new_fa->fa_tos = tos; new_fa->fa_type = cfg->fc_type; new_fa->fa_state = 0; /* * Insert new entry to the list. */ if (!fa_head) { fa_head = fib_insert_node(t, key, plen); if (unlikely(!fa_head)) { err = -ENOMEM; goto out_free_new_fa; } } if (!plen) tb->tb_num_default++; list_add_tail_rcu(&new_fa->fa_list, (fa ? &fa->fa_list : fa_head)); rt_cache_flush(cfg->fc_nlinfo.nl_net, -1); rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id, &cfg->fc_nlinfo, 0); succeeded: return 0; out_free_new_fa: kmem_cache_free(fn_alias_kmem, new_fa); out: fib_release_info(fi); err: return err; } /* should be called with rcu_read_lock */ static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l, t_key key, const struct flowi4 *flp, struct fib_result *res, int fib_flags) { struct leaf_info *li; struct hlist_head *hhead = &l->list; struct hlist_node *node; hlist_for_each_entry_rcu(li, node, hhead, hlist) { struct fib_alias *fa; if (l->key != (key & li->mask_plen)) continue; list_for_each_entry_rcu(fa, &li->falh, fa_list) { struct fib_info *fi = fa->fa_info; int nhsel, err; if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos) continue; if (fi->fib_dead) continue; if (fa->fa_info->fib_scope < flp->flowi4_scope) continue; fib_alias_accessed(fa); err = fib_props[fa->fa_type].error; if (err) { #ifdef CONFIG_IP_FIB_TRIE_STATS t->stats.semantic_match_passed++; #endif return err; } if (fi->fib_flags & RTNH_F_DEAD) continue; for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) { const struct fib_nh *nh = &fi->fib_nh[nhsel]; if (nh->nh_flags & RTNH_F_DEAD) continue; if (flp->flowi4_oif && flp->flowi4_oif != nh->nh_oif) continue; #ifdef CONFIG_IP_FIB_TRIE_STATS t->stats.semantic_match_passed++; #endif res->prefixlen = li->plen; res->nh_sel = nhsel; res->type = fa->fa_type; res->scope = fa->fa_info->fib_scope; res->fi = fi; res->table = tb; res->fa_head = &li->falh; if (!(fib_flags & FIB_LOOKUP_NOREF)) atomic_inc(&fi->fib_clntref); return 0; } } #ifdef CONFIG_IP_FIB_TRIE_STATS t->stats.semantic_match_miss++; #endif } return 1; } int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp, struct fib_result *res, int fib_flags) { struct trie *t = (struct trie *) tb->tb_data; int ret; struct rt_trie_node *n; struct tnode *pn; unsigned int pos, bits; t_key key = ntohl(flp->daddr); unsigned int chopped_off; t_key cindex = 0; unsigned int current_prefix_length = KEYLENGTH; struct tnode *cn; t_key pref_mismatch; rcu_read_lock(); n = rcu_dereference(t->trie); if (!n) goto failed; #ifdef CONFIG_IP_FIB_TRIE_STATS t->stats.gets++; #endif /* Just a leaf? */ if (IS_LEAF(n)) { ret = check_leaf(tb, t, (struct leaf *)n, key, flp, res, fib_flags); goto found; } pn = (struct tnode *) n; chopped_off = 0; while (pn) { pos = pn->pos; bits = pn->bits; if (!chopped_off) cindex = tkey_extract_bits(mask_pfx(key, current_prefix_length), pos, bits); n = tnode_get_child_rcu(pn, cindex); if (n == NULL) { #ifdef CONFIG_IP_FIB_TRIE_STATS t->stats.null_node_hit++; #endif goto backtrace; } if (IS_LEAF(n)) { ret = check_leaf(tb, t, (struct leaf *)n, key, flp, res, fib_flags); if (ret > 0) goto backtrace; goto found; } cn = (struct tnode *)n; /* * It's a tnode, and we can do some extra checks here if we * like, to avoid descending into a dead-end branch. * This tnode is in the parent's child array at index * key[p_pos..p_pos+p_bits] but potentially with some bits * chopped off, so in reality the index may be just a * subprefix, padded with zero at the end. * We can also take a look at any skipped bits in this * tnode - everything up to p_pos is supposed to be ok, * and the non-chopped bits of the index (se previous * paragraph) are also guaranteed ok, but the rest is * considered unknown. * * The skipped bits are key[pos+bits..cn->pos]. */ /* If current_prefix_length < pos+bits, we are already doing * actual prefix matching, which means everything from * pos+(bits-chopped_off) onward must be zero along some * branch of this subtree - otherwise there is *no* valid * prefix present. Here we can only check the skipped * bits. Remember, since we have already indexed into the * parent's child array, we know that the bits we chopped of * *are* zero. */ /* NOTA BENE: Checking only skipped bits for the new node here */ if (current_prefix_length < pos+bits) { if (tkey_extract_bits(cn->key, current_prefix_length, cn->pos - current_prefix_length) || !(cn->child[0])) goto backtrace; } /* * If chopped_off=0, the index is fully validated and we * only need to look at the skipped bits for this, the new, * tnode. What we actually want to do is to find out if * these skipped bits match our key perfectly, or if we will * have to count on finding a matching prefix further down, * because if we do, we would like to have some way of * verifying the existence of such a prefix at this point. */ /* The only thing we can do at this point is to verify that * any such matching prefix can indeed be a prefix to our * key, and if the bits in the node we are inspecting that * do not match our key are not ZERO, this cannot be true. * Thus, find out where there is a mismatch (before cn->pos) * and verify that all the mismatching bits are zero in the * new tnode's key. */ /* * Note: We aren't very concerned about the piece of * the key that precede pn->pos+pn->bits, since these * have already been checked. The bits after cn->pos * aren't checked since these are by definition * "unknown" at this point. Thus, what we want to see * is if we are about to enter the "prefix matching" * state, and in that case verify that the skipped * bits that will prevail throughout this subtree are * zero, as they have to be if we are to find a * matching prefix. */ pref_mismatch = mask_pfx(cn->key ^ key, cn->pos); /* * In short: If skipped bits in this node do not match * the search key, enter the "prefix matching" * state.directly. */ if (pref_mismatch) { int mp = KEYLENGTH - fls(pref_mismatch); if (tkey_extract_bits(cn->key, mp, cn->pos - mp) != 0) goto backtrace; if (current_prefix_length >= cn->pos) current_prefix_length = mp; } pn = (struct tnode *)n; /* Descend */ chopped_off = 0; continue; backtrace: chopped_off++; /* As zero don't change the child key (cindex) */ while ((chopped_off <= pn->bits) && !(cindex & (1<<(chopped_off-1)))) chopped_off++; /* Decrease current_... with bits chopped off */ if (current_prefix_length > pn->pos + pn->bits - chopped_off) current_prefix_length = pn->pos + pn->bits - chopped_off; /* * Either we do the actual chop off according or if we have * chopped off all bits in this tnode walk up to our parent. */ if (chopped_off <= pn->bits) { cindex &= ~(1 << (chopped_off-1)); } else { struct tnode *parent = node_parent_rcu((struct rt_trie_node *) pn); if (!parent) goto failed; /* Get Child's index */ cindex = tkey_extract_bits(pn->key, parent->pos, parent->bits); pn = parent; chopped_off = 0; #ifdef CONFIG_IP_FIB_TRIE_STATS t->stats.backtrack++; #endif goto backtrace; } } failed: ret = 1; found: rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(fib_table_lookup); /* * Remove the leaf and return parent. */ static void trie_leaf_remove(struct trie *t, struct leaf *l) { struct tnode *tp = node_parent((struct rt_trie_node *) l); pr_debug("entering trie_leaf_remove(%p)\n", l); if (tp) { t_key cindex = tkey_extract_bits(l->key, tp->pos, tp->bits); put_child(t, (struct tnode *)tp, cindex, NULL); trie_rebalance(t, tp); } else RCU_INIT_POINTER(t->trie, NULL); free_leaf(l); } /* * Caller must hold RTNL. */ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg) { struct trie *t = (struct trie *) tb->tb_data; u32 key, mask; int plen = cfg->fc_dst_len; u8 tos = cfg->fc_tos; struct fib_alias *fa, *fa_to_delete; struct list_head *fa_head; struct leaf *l; struct leaf_info *li; if (plen > 32) return -EINVAL; key = ntohl(cfg->fc_dst); mask = ntohl(inet_make_mask(plen)); if (key & ~mask) return -EINVAL; key = key & mask; l = fib_find_node(t, key); if (!l) return -ESRCH; fa_head = get_fa_head(l, plen); fa = fib_find_alias(fa_head, tos, 0); if (!fa) return -ESRCH; pr_debug("Deleting %08x/%d tos=%d t=%p\n", key, plen, tos, t); fa_to_delete = NULL; fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list); list_for_each_entry_continue(fa, fa_head, fa_list) { struct fib_info *fi = fa->fa_info; if (fa->fa_tos != tos) break; if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) && (cfg->fc_scope == RT_SCOPE_NOWHERE || fa->fa_info->fib_scope == cfg->fc_scope) && (!cfg->fc_prefsrc || fi->fib_prefsrc == cfg->fc_prefsrc) && (!cfg->fc_protocol || fi->fib_protocol == cfg->fc_protocol) && fib_nh_match(cfg, fi) == 0) { fa_to_delete = fa; break; } } if (!fa_to_delete) return -ESRCH; fa = fa_to_delete; rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id, &cfg->fc_nlinfo, 0); l = fib_find_node(t, key); li = find_leaf_info(l, plen); list_del_rcu(&fa->fa_list); if (!plen) tb->tb_num_default--; if (list_empty(fa_head)) { hlist_del_rcu(&li->hlist); free_leaf_info(li); } if (hlist_empty(&l->list)) trie_leaf_remove(t, l); if (fa->fa_state & FA_S_ACCESSED) rt_cache_flush(cfg->fc_nlinfo.nl_net, -1); fib_release_info(fa->fa_info); alias_free_mem_rcu(fa); return 0; } static int trie_flush_list(struct list_head *head) { struct fib_alias *fa, *fa_node; int found = 0; list_for_each_entry_safe(fa, fa_node, head, fa_list) { struct fib_info *fi = fa->fa_info; if (fi && (fi->fib_flags & RTNH_F_DEAD)) { list_del_rcu(&fa->fa_list); fib_release_info(fa->fa_info); alias_free_mem_rcu(fa); found++; } } return found; } static int trie_flush_leaf(struct leaf *l) { int found = 0; struct hlist_head *lih = &l->list; struct hlist_node *node, *tmp; struct leaf_info *li = NULL; hlist_for_each_entry_safe(li, node, tmp, lih, hlist) { found += trie_flush_list(&li->falh); if (list_empty(&li->falh)) { hlist_del_rcu(&li->hlist); free_leaf_info(li); } } return found; } /* * Scan for the next right leaf starting at node p->child[idx] * Since we have back pointer, no recursion necessary. */ static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c) { do { t_key idx; if (c) idx = tkey_extract_bits(c->key, p->pos, p->bits) + 1; else idx = 0; while (idx < 1u << p->bits) { c = tnode_get_child_rcu(p, idx++); if (!c) continue; if (IS_LEAF(c)) { prefetch(rcu_dereference_rtnl(p->child[idx])); return (struct leaf *) c; } /* Rescan start scanning in new node */ p = (struct tnode *) c; idx = 0; } /* Node empty, walk back up to parent */ c = (struct rt_trie_node *) p; } while ((p = node_parent_rcu(c)) != NULL); return NULL; /* Root of trie */ } static struct leaf *trie_firstleaf(struct trie *t) { struct tnode *n = (struct tnode *)rcu_dereference_rtnl(t->trie); if (!n) return NULL; if (IS_LEAF(n)) /* trie is just a leaf */ return (struct leaf *) n; return leaf_walk_rcu(n, NULL); } static struct leaf *trie_nextleaf(struct leaf *l) { struct rt_trie_node *c = (struct rt_trie_node *) l; struct tnode *p = node_parent_rcu(c); if (!p) return NULL; /* trie with just one leaf */ return leaf_walk_rcu(p, c); } static struct leaf *trie_leafindex(struct trie *t, int index) { struct leaf *l = trie_firstleaf(t); while (l && index-- > 0) l = trie_nextleaf(l); return l; } /* * Caller must hold RTNL. */ int fib_table_flush(struct fib_table *tb) { struct trie *t = (struct trie *) tb->tb_data; struct leaf *l, *ll = NULL; int found = 0; for (l = trie_firstleaf(t); l; l = trie_nextleaf(l)) { found += trie_flush_leaf(l); if (ll && hlist_empty(&ll->list)) trie_leaf_remove(t, ll); ll = l; } if (ll && hlist_empty(&ll->list)) trie_leaf_remove(t, ll); pr_debug("trie_flush found=%d\n", found); return found; } void fib_free_table(struct fib_table *tb) { kfree(tb); } static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah, struct fib_table *tb, struct sk_buff *skb, struct netlink_callback *cb) { int i, s_i; struct fib_alias *fa; __be32 xkey = htonl(key); s_i = cb->args[5]; i = 0; /* rcu_read_lock is hold by caller */ list_for_each_entry_rcu(fa, fah, fa_list) { if (i < s_i) { i++; continue; } if (fib_dump_info(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, RTM_NEWROUTE, tb->tb_id, fa->fa_type, xkey, plen, fa->fa_tos, fa->fa_info, NLM_F_MULTI) < 0) { cb->args[5] = i; return -1; } i++; } cb->args[5] = i; return skb->len; } static int fn_trie_dump_leaf(struct leaf *l, struct fib_table *tb, struct sk_buff *skb, struct netlink_callback *cb) { struct leaf_info *li; struct hlist_node *node; int i, s_i; s_i = cb->args[4]; i = 0; /* rcu_read_lock is hold by caller */ hlist_for_each_entry_rcu(li, node, &l->list, hlist) { if (i < s_i) { i++; continue; } if (i > s_i) cb->args[5] = 0; if (list_empty(&li->falh)) continue; if (fn_trie_dump_fa(l->key, li->plen, &li->falh, tb, skb, cb) < 0) { cb->args[4] = i; return -1; } i++; } cb->args[4] = i; return skb->len; } int fib_table_dump(struct fib_table *tb, struct sk_buff *skb, struct netlink_callback *cb) { struct leaf *l; struct trie *t = (struct trie *) tb->tb_data; t_key key = cb->args[2]; int count = cb->args[3]; rcu_read_lock(); /* Dump starting at last key. * Note: 0.0.0.0/0 (ie default) is first key. */ if (count == 0) l = trie_firstleaf(t); else { /* Normally, continue from last key, but if that is missing * fallback to using slow rescan */ l = fib_find_node(t, key); if (!l) l = trie_leafindex(t, count); } while (l) { cb->args[2] = l->key; if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) { cb->args[3] = count; rcu_read_unlock(); return -1; } ++count; l = trie_nextleaf(l); memset(&cb->args[4], 0, sizeof(cb->args) - 4*sizeof(cb->args[0])); } cb->args[3] = count; rcu_read_unlock(); return skb->len; } void __init fib_trie_init(void) { fn_alias_kmem = kmem_cache_create("ip_fib_alias", sizeof(struct fib_alias), 0, SLAB_PANIC, NULL); trie_leaf_kmem = kmem_cache_create("ip_fib_trie", max(sizeof(struct leaf), sizeof(struct leaf_info)), 0, SLAB_PANIC, NULL); } struct fib_table *fib_trie_table(u32 id) { struct fib_table *tb; struct trie *t; tb = kmalloc(sizeof(struct fib_table) + sizeof(struct trie), GFP_KERNEL); if (tb == NULL) return NULL; tb->tb_id = id; tb->tb_default = -1; tb->tb_num_default = 0; t = (struct trie *) tb->tb_data; memset(t, 0, sizeof(*t)); return tb; } #ifdef CONFIG_PROC_FS /* Depth first Trie walk iterator */ struct fib_trie_iter { struct seq_net_private p; struct fib_table *tb; struct tnode *tnode; unsigned int index; unsigned int depth; }; static struct rt_trie_node *fib_trie_get_next(struct fib_trie_iter *iter) { struct tnode *tn = iter->tnode; unsigned int cindex = iter->index; struct tnode *p; /* A single entry routing table */ if (!tn) return NULL; pr_debug("get_next iter={node=%p index=%d depth=%d}\n", iter->tnode, iter->index, iter->depth); rescan: while (cindex < (1<<tn->bits)) { struct rt_trie_node *n = tnode_get_child_rcu(tn, cindex); if (n) { if (IS_LEAF(n)) { iter->tnode = tn; iter->index = cindex + 1; } else { /* push down one level */ iter->tnode = (struct tnode *) n; iter->index = 0; ++iter->depth; } return n; } ++cindex; } /* Current node exhausted, pop back up */ p = node_parent_rcu((struct rt_trie_node *)tn); if (p) { cindex = tkey_extract_bits(tn->key, p->pos, p->bits)+1; tn = p; --iter->depth; goto rescan; } /* got root? */ return NULL; } static struct rt_trie_node *fib_trie_get_first(struct fib_trie_iter *iter, struct trie *t) { struct rt_trie_node *n; if (!t) return NULL; n = rcu_dereference(t->trie); if (!n) return NULL; if (IS_TNODE(n)) { iter->tnode = (struct tnode *) n; iter->index = 0; iter->depth = 1; } else { iter->tnode = NULL; iter->index = 0; iter->depth = 0; } return n; } static void trie_collect_stats(struct trie *t, struct trie_stat *s) { struct rt_trie_node *n; struct fib_trie_iter iter; memset(s, 0, sizeof(*s)); rcu_read_lock(); for (n = fib_trie_get_first(&iter, t); n; n = fib_trie_get_next(&iter)) { if (IS_LEAF(n)) { struct leaf *l = (struct leaf *)n; struct leaf_info *li; struct hlist_node *tmp; s->leaves++; s->totdepth += iter.depth; if (iter.depth > s->maxdepth) s->maxdepth = iter.depth; hlist_for_each_entry_rcu(li, tmp, &l->list, hlist) ++s->prefixes; } else { const struct tnode *tn = (const struct tnode *) n; int i; s->tnodes++; if (tn->bits < MAX_STAT_DEPTH) s->nodesizes[tn->bits]++; for (i = 0; i < (1<<tn->bits); i++) if (!tn->child[i]) s->nullpointers++; } } rcu_read_unlock(); } /* * This outputs /proc/net/fib_triestats */ static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat) { unsigned int i, max, pointers, bytes, avdepth; if (stat->leaves) avdepth = stat->totdepth*100 / stat->leaves; else avdepth = 0; seq_printf(seq, "\tAver depth: %u.%02d\n", avdepth / 100, avdepth % 100); seq_printf(seq, "\tMax depth: %u\n", stat->maxdepth); seq_printf(seq, "\tLeaves: %u\n", stat->leaves); bytes = sizeof(struct leaf) * stat->leaves; seq_printf(seq, "\tPrefixes: %u\n", stat->prefixes); bytes += sizeof(struct leaf_info) * stat->prefixes; seq_printf(seq, "\tInternal nodes: %u\n\t", stat->tnodes); bytes += sizeof(struct tnode) * stat->tnodes; max = MAX_STAT_DEPTH; while (max > 0 && stat->nodesizes[max-1] == 0) max--; pointers = 0; for (i = 1; i <= max; i++) if (stat->nodesizes[i] != 0) { seq_printf(seq, " %u: %u", i, stat->nodesizes[i]); pointers += (1<<i) * stat->nodesizes[i]; } seq_putc(seq, '\n'); seq_printf(seq, "\tPointers: %u\n", pointers); bytes += sizeof(struct rt_trie_node *) * pointers; seq_printf(seq, "Null ptrs: %u\n", stat->nullpointers); seq_printf(seq, "Total size: %u kB\n", (bytes + 1023) / 1024); } #ifdef CONFIG_IP_FIB_TRIE_STATS static void trie_show_usage(struct seq_file *seq, const struct trie_use_stats *stats) { seq_printf(seq, "\nCounters:\n---------\n"); seq_printf(seq, "gets = %u\n", stats->gets); seq_printf(seq, "backtracks = %u\n", stats->backtrack); seq_printf(seq, "semantic match passed = %u\n", stats->semantic_match_passed); seq_printf(seq, "semantic match miss = %u\n", stats->semantic_match_miss); seq_printf(seq, "null node hit= %u\n", stats->null_node_hit); seq_printf(seq, "skipped node resize = %u\n\n", stats->resize_node_skipped); } #endif /* CONFIG_IP_FIB_TRIE_STATS */ static void fib_table_print(struct seq_file *seq, struct fib_table *tb) { if (tb->tb_id == RT_TABLE_LOCAL) seq_puts(seq, "Local:\n"); else if (tb->tb_id == RT_TABLE_MAIN) seq_puts(seq, "Main:\n"); else seq_printf(seq, "Id %d:\n", tb->tb_id); } static int fib_triestat_seq_show(struct seq_file *seq, void *v) { struct net *net = (struct net *)seq->private; unsigned int h; seq_printf(seq, "Basic info: size of leaf:" " %Zd bytes, size of tnode: %Zd bytes.\n", sizeof(struct leaf), sizeof(struct tnode)); for (h = 0; h < FIB_TABLE_HASHSZ; h++) { struct hlist_head *head = &net->ipv4.fib_table_hash[h]; struct hlist_node *node; struct fib_table *tb; hlist_for_each_entry_rcu(tb, node, head, tb_hlist) { struct trie *t = (struct trie *) tb->tb_data; struct trie_stat stat; if (!t) continue; fib_table_print(seq, tb); trie_collect_stats(t, &stat); trie_show_stats(seq, &stat); #ifdef CONFIG_IP_FIB_TRIE_STATS trie_show_usage(seq, &t->stats); #endif } } return 0; } static int fib_triestat_seq_open(struct inode *inode, struct file *file) { return single_open_net(inode, file, fib_triestat_seq_show); } static const struct file_operations fib_triestat_fops = { .owner = THIS_MODULE, .open = fib_triestat_seq_open, .read = seq_read, .llseek = seq_lseek, .release = single_release_net, }; static struct rt_trie_node *fib_trie_get_idx(struct seq_file *seq, loff_t pos) { struct fib_trie_iter *iter = seq->private; struct net *net = seq_file_net(seq); loff_t idx = 0; unsigned int h; for (h = 0; h < FIB_TABLE_HASHSZ; h++) { struct hlist_head *head = &net->ipv4.fib_table_hash[h]; struct hlist_node *node; struct fib_table *tb; hlist_for_each_entry_rcu(tb, node, head, tb_hlist) { struct rt_trie_node *n; for (n = fib_trie_get_first(iter, (struct trie *) tb->tb_data); n; n = fib_trie_get_next(iter)) if (pos == idx++) { iter->tb = tb; return n; } } } return NULL; } static void *fib_trie_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { rcu_read_lock(); return fib_trie_get_idx(seq, *pos); } static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct fib_trie_iter *iter = seq->private; struct net *net = seq_file_net(seq); struct fib_table *tb = iter->tb; struct hlist_node *tb_node; unsigned int h; struct rt_trie_node *n; ++*pos; /* next node in same table */ n = fib_trie_get_next(iter); if (n) return n; /* walk rest of this hash chain */ h = tb->tb_id & (FIB_TABLE_HASHSZ - 1); while ((tb_node = rcu_dereference(hlist_next_rcu(&tb->tb_hlist)))) { tb = hlist_entry(tb_node, struct fib_table, tb_hlist); n = fib_trie_get_first(iter, (struct trie *) tb->tb_data); if (n) goto found; } /* new hash chain */ while (++h < FIB_TABLE_HASHSZ) { struct hlist_head *head = &net->ipv4.fib_table_hash[h]; hlist_for_each_entry_rcu(tb, tb_node, head, tb_hlist) { n = fib_trie_get_first(iter, (struct trie *) tb->tb_data); if (n) goto found; } } return NULL; found: iter->tb = tb; return n; } static void fib_trie_seq_stop(struct seq_file *seq, void *v) __releases(RCU) { rcu_read_unlock(); } static void seq_indent(struct seq_file *seq, int n) { while (n-- > 0) seq_puts(seq, " "); } static inline const char *rtn_scope(char *buf, size_t len, enum rt_scope_t s) { switch (s) { case RT_SCOPE_UNIVERSE: return "universe"; case RT_SCOPE_SITE: return "site"; case RT_SCOPE_LINK: return "link"; case RT_SCOPE_HOST: return "host"; case RT_SCOPE_NOWHERE: return "nowhere"; default: snprintf(buf, len, "scope=%d", s); return buf; } } static const char *const rtn_type_names[__RTN_MAX] = { [RTN_UNSPEC] = "UNSPEC", [RTN_UNICAST] = "UNICAST", [RTN_LOCAL] = "LOCAL", [RTN_BROADCAST] = "BROADCAST", [RTN_ANYCAST] = "ANYCAST", [RTN_MULTICAST] = "MULTICAST", [RTN_BLACKHOLE] = "BLACKHOLE", [RTN_UNREACHABLE] = "UNREACHABLE", [RTN_PROHIBIT] = "PROHIBIT", [RTN_THROW] = "THROW", [RTN_NAT] = "NAT", [RTN_XRESOLVE] = "XRESOLVE", }; static inline const char *rtn_type(char *buf, size_t len, unsigned int t) { if (t < __RTN_MAX && rtn_type_names[t]) return rtn_type_names[t]; snprintf(buf, len, "type %u", t); return buf; } /* Pretty print the trie */ static int fib_trie_seq_show(struct seq_file *seq, void *v) { const struct fib_trie_iter *iter = seq->private; struct rt_trie_node *n = v; if (!node_parent_rcu(n)) fib_table_print(seq, iter->tb); if (IS_TNODE(n)) { struct tnode *tn = (struct tnode *) n; __be32 prf = htonl(mask_pfx(tn->key, tn->pos)); seq_indent(seq, iter->depth-1); seq_printf(seq, " +-- %pI4/%d %d %d %d\n", &prf, tn->pos, tn->bits, tn->full_children, tn->empty_children); } else { struct leaf *l = (struct leaf *) n; struct leaf_info *li; struct hlist_node *node; __be32 val = htonl(l->key); seq_indent(seq, iter->depth); seq_printf(seq, " |-- %pI4\n", &val); hlist_for_each_entry_rcu(li, node, &l->list, hlist) { struct fib_alias *fa; list_for_each_entry_rcu(fa, &li->falh, fa_list) { char buf1[32], buf2[32]; seq_indent(seq, iter->depth+1); seq_printf(seq, " /%d %s %s", li->plen, rtn_scope(buf1, sizeof(buf1), fa->fa_info->fib_scope), rtn_type(buf2, sizeof(buf2), fa->fa_type)); if (fa->fa_tos) seq_printf(seq, " tos=%d", fa->fa_tos); seq_putc(seq, '\n'); } } } return 0; } static const struct seq_operations fib_trie_seq_ops = { .start = fib_trie_seq_start, .next = fib_trie_seq_next, .stop = fib_trie_seq_stop, .show = fib_trie_seq_show, }; static int fib_trie_seq_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &fib_trie_seq_ops, sizeof(struct fib_trie_iter)); } static const struct file_operations fib_trie_fops = { .owner = THIS_MODULE, .open = fib_trie_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; struct fib_route_iter { struct seq_net_private p; struct trie *main_trie; loff_t pos; t_key key; }; static struct leaf *fib_route_get_idx(struct fib_route_iter *iter, loff_t pos) { struct leaf *l = NULL; struct trie *t = iter->main_trie; /* use cache location of last found key */ if (iter->pos > 0 && pos >= iter->pos && (l = fib_find_node(t, iter->key))) pos -= iter->pos; else { iter->pos = 0; l = trie_firstleaf(t); } while (l && pos-- > 0) { iter->pos++; l = trie_nextleaf(l); } if (l) iter->key = pos; /* remember it */ else iter->pos = 0; /* forget it */ return l; } static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { struct fib_route_iter *iter = seq->private; struct fib_table *tb; rcu_read_lock(); tb = fib_get_table(seq_file_net(seq), RT_TABLE_MAIN); if (!tb) return NULL; iter->main_trie = (struct trie *) tb->tb_data; if (*pos == 0) return SEQ_START_TOKEN; else return fib_route_get_idx(iter, *pos - 1); } static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct fib_route_iter *iter = seq->private; struct leaf *l = v; ++*pos; if (v == SEQ_START_TOKEN) { iter->pos = 0; l = trie_firstleaf(iter->main_trie); } else { iter->pos++; l = trie_nextleaf(l); } if (l) iter->key = l->key; else iter->pos = 0; return l; } static void fib_route_seq_stop(struct seq_file *seq, void *v) __releases(RCU) { rcu_read_unlock(); } static unsigned int fib_flag_trans(int type, __be32 mask, const struct fib_info *fi) { unsigned int flags = 0; if (type == RTN_UNREACHABLE || type == RTN_PROHIBIT) flags = RTF_REJECT; if (fi && fi->fib_nh->nh_gw) flags |= RTF_GATEWAY; if (mask == htonl(0xFFFFFFFF)) flags |= RTF_HOST; flags |= RTF_UP; return flags; } /* * This outputs /proc/net/route. * The format of the file is not supposed to be changed * and needs to be same as fib_hash output to avoid breaking * legacy utilities */ static int fib_route_seq_show(struct seq_file *seq, void *v) { struct leaf *l = v; struct leaf_info *li; struct hlist_node *node; if (v == SEQ_START_TOKEN) { seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway " "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU" "\tWindow\tIRTT"); return 0; } hlist_for_each_entry_rcu(li, node, &l->list, hlist) { struct fib_alias *fa; __be32 mask, prefix; mask = inet_make_mask(li->plen); prefix = htonl(l->key); list_for_each_entry_rcu(fa, &li->falh, fa_list) { const struct fib_info *fi = fa->fa_info; unsigned int flags = fib_flag_trans(fa->fa_type, mask, fi); int len; if (fa->fa_type == RTN_BROADCAST || fa->fa_type == RTN_MULTICAST) continue; if (fi) seq_printf(seq, "%s\t%08X\t%08X\t%04X\t%d\t%u\t" "%d\t%08X\t%d\t%u\t%u%n", fi->fib_dev ? fi->fib_dev->name : "*", prefix, fi->fib_nh->nh_gw, flags, 0, 0, fi->fib_priority, mask, (fi->fib_advmss ? fi->fib_advmss + 40 : 0), fi->fib_window, fi->fib_rtt >> 3, &len); else seq_printf(seq, "*\t%08X\t%08X\t%04X\t%d\t%u\t" "%d\t%08X\t%d\t%u\t%u%n", prefix, 0, flags, 0, 0, 0, mask, 0, 0, 0, &len); seq_printf(seq, "%*s\n", 127 - len, ""); } } return 0; } static const struct seq_operations fib_route_seq_ops = { .start = fib_route_seq_start, .next = fib_route_seq_next, .stop = fib_route_seq_stop, .show = fib_route_seq_show, }; static int fib_route_seq_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &fib_route_seq_ops, sizeof(struct fib_route_iter)); } static const struct file_operations fib_route_fops = { .owner = THIS_MODULE, .open = fib_route_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; int __net_init fib_proc_init(struct net *net) { if (!proc_net_fops_create(net, "fib_trie", S_IRUGO, &fib_trie_fops)) goto out1; if (!proc_net_fops_create(net, "fib_triestat", S_IRUGO, &fib_triestat_fops)) goto out2; if (!proc_net_fops_create(net, "route", S_IRUGO, &fib_route_fops)) goto out3; return 0; out3: proc_net_remove(net, "fib_triestat"); out2: proc_net_remove(net, "fib_trie"); out1: return -ENOMEM; } void __net_exit fib_proc_exit(struct net *net) { proc_net_remove(net, "fib_trie"); proc_net_remove(net, "fib_triestat"); proc_net_remove(net, "route"); } #endif /* CONFIG_PROC_FS */
gpl-2.0
Holong/kernel-zynq
arch/powerpc/oprofile/op_model_fsl_emb.c
2312
7125
/* * Freescale Embedded oprofile support, based on ppc64 oprofile support * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM * * Copyright (c) 2004, 2010 Freescale Semiconductor, Inc * * Author: Andy Fleming * Maintainer: Kumar Gala <galak@kernel.crashing.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/oprofile.h> #include <linux/smp.h> #include <asm/ptrace.h> #include <asm/processor.h> #include <asm/cputable.h> #include <asm/reg_fsl_emb.h> #include <asm/page.h> #include <asm/pmc.h> #include <asm/oprofile_impl.h> static unsigned long reset_value[OP_MAX_COUNTER]; static int num_counters; static int oprofile_running; static inline u32 get_pmlca(int ctr) { u32 pmlca; switch (ctr) { case 0: pmlca = mfpmr(PMRN_PMLCA0); break; case 1: pmlca = mfpmr(PMRN_PMLCA1); break; case 2: pmlca = mfpmr(PMRN_PMLCA2); break; case 3: pmlca = mfpmr(PMRN_PMLCA3); break; case 4: pmlca = mfpmr(PMRN_PMLCA4); break; case 5: pmlca = mfpmr(PMRN_PMLCA5); break; default: panic("Bad ctr number\n"); } return pmlca; } static inline void set_pmlca(int ctr, u32 pmlca) { switch (ctr) { case 0: mtpmr(PMRN_PMLCA0, pmlca); break; case 1: mtpmr(PMRN_PMLCA1, pmlca); break; case 2: mtpmr(PMRN_PMLCA2, pmlca); break; case 3: mtpmr(PMRN_PMLCA3, pmlca); break; case 4: mtpmr(PMRN_PMLCA4, pmlca); break; case 5: mtpmr(PMRN_PMLCA5, pmlca); break; default: panic("Bad ctr number\n"); } } static inline unsigned int ctr_read(unsigned int i) { switch(i) { case 0: return mfpmr(PMRN_PMC0); case 1: return mfpmr(PMRN_PMC1); case 2: return mfpmr(PMRN_PMC2); case 3: return mfpmr(PMRN_PMC3); case 4: return mfpmr(PMRN_PMC4); case 5: return mfpmr(PMRN_PMC5); default: return 0; } } static inline void ctr_write(unsigned int i, unsigned int val) { switch(i) { case 0: mtpmr(PMRN_PMC0, val); break; case 1: mtpmr(PMRN_PMC1, val); break; case 2: mtpmr(PMRN_PMC2, val); break; case 3: mtpmr(PMRN_PMC3, val); break; case 4: mtpmr(PMRN_PMC4, val); break; case 5: mtpmr(PMRN_PMC5, val); break; default: break; } } static void init_pmc_stop(int ctr) { u32 pmlca = (PMLCA_FC | PMLCA_FCS | PMLCA_FCU | PMLCA_FCM1 | PMLCA_FCM0); u32 pmlcb = 0; switch (ctr) { case 0: mtpmr(PMRN_PMLCA0, pmlca); mtpmr(PMRN_PMLCB0, pmlcb); break; case 1: mtpmr(PMRN_PMLCA1, pmlca); mtpmr(PMRN_PMLCB1, pmlcb); break; case 2: mtpmr(PMRN_PMLCA2, pmlca); mtpmr(PMRN_PMLCB2, pmlcb); break; case 3: mtpmr(PMRN_PMLCA3, pmlca); mtpmr(PMRN_PMLCB3, pmlcb); break; case 4: mtpmr(PMRN_PMLCA4, pmlca); mtpmr(PMRN_PMLCB4, pmlcb); break; case 5: mtpmr(PMRN_PMLCA5, pmlca); mtpmr(PMRN_PMLCB5, pmlcb); break; default: panic("Bad ctr number!\n"); } } static void set_pmc_event(int ctr, int event) { u32 pmlca; pmlca = get_pmlca(ctr); pmlca = (pmlca & ~PMLCA_EVENT_MASK) | ((event << PMLCA_EVENT_SHIFT) & PMLCA_EVENT_MASK); set_pmlca(ctr, pmlca); } static void set_pmc_user_kernel(int ctr, int user, int kernel) { u32 pmlca; pmlca = get_pmlca(ctr); if(user) pmlca &= ~PMLCA_FCU; else pmlca |= PMLCA_FCU; if(kernel) pmlca &= ~PMLCA_FCS; else pmlca |= PMLCA_FCS; set_pmlca(ctr, pmlca); } static void set_pmc_marked(int ctr, int mark0, int mark1) { u32 pmlca = get_pmlca(ctr); if(mark0) pmlca &= ~PMLCA_FCM0; else pmlca |= PMLCA_FCM0; if(mark1) pmlca &= ~PMLCA_FCM1; else pmlca |= PMLCA_FCM1; set_pmlca(ctr, pmlca); } static void pmc_start_ctr(int ctr, int enable) { u32 pmlca = get_pmlca(ctr); pmlca &= ~PMLCA_FC; if (enable) pmlca |= PMLCA_CE; else pmlca &= ~PMLCA_CE; set_pmlca(ctr, pmlca); } static void pmc_start_ctrs(int enable) { u32 pmgc0 = mfpmr(PMRN_PMGC0); pmgc0 &= ~PMGC0_FAC; pmgc0 |= PMGC0_FCECE; if (enable) pmgc0 |= PMGC0_PMIE; else pmgc0 &= ~PMGC0_PMIE; mtpmr(PMRN_PMGC0, pmgc0); } static void pmc_stop_ctrs(void) { u32 pmgc0 = mfpmr(PMRN_PMGC0); pmgc0 |= PMGC0_FAC; pmgc0 &= ~(PMGC0_PMIE | PMGC0_FCECE); mtpmr(PMRN_PMGC0, pmgc0); } static int fsl_emb_cpu_setup(struct op_counter_config *ctr) { int i; /* freeze all counters */ pmc_stop_ctrs(); for (i = 0;i < num_counters;i++) { init_pmc_stop(i); set_pmc_event(i, ctr[i].event); set_pmc_user_kernel(i, ctr[i].user, ctr[i].kernel); } return 0; } static int fsl_emb_reg_setup(struct op_counter_config *ctr, struct op_system_config *sys, int num_ctrs) { int i; num_counters = num_ctrs; /* Our counters count up, and "count" refers to * how much before the next interrupt, and we interrupt * on overflow. So we calculate the starting value * which will give us "count" until overflow. * Then we set the events on the enabled counters */ for (i = 0; i < num_counters; ++i) reset_value[i] = 0x80000000UL - ctr[i].count; return 0; } static int fsl_emb_start(struct op_counter_config *ctr) { int i; mtmsr(mfmsr() | MSR_PMM); for (i = 0; i < num_counters; ++i) { if (ctr[i].enabled) { ctr_write(i, reset_value[i]); /* Set each enabled counter to only * count when the Mark bit is *not* set */ set_pmc_marked(i, 1, 0); pmc_start_ctr(i, 1); } else { ctr_write(i, 0); /* Set the ctr to be stopped */ pmc_start_ctr(i, 0); } } /* Clear the freeze bit, and enable the interrupt. * The counters won't actually start until the rfi clears * the PMM bit */ pmc_start_ctrs(1); oprofile_running = 1; pr_debug("start on cpu %d, pmgc0 %x\n", smp_processor_id(), mfpmr(PMRN_PMGC0)); return 0; } static void fsl_emb_stop(void) { /* freeze counters */ pmc_stop_ctrs(); oprofile_running = 0; pr_debug("stop on cpu %d, pmgc0 %x\n", smp_processor_id(), mfpmr(PMRN_PMGC0)); mb(); } static void fsl_emb_handle_interrupt(struct pt_regs *regs, struct op_counter_config *ctr) { unsigned long pc; int is_kernel; int val; int i; pc = regs->nip; is_kernel = is_kernel_addr(pc); for (i = 0; i < num_counters; ++i) { val = ctr_read(i); if (val < 0) { if (oprofile_running && ctr[i].enabled) { oprofile_add_ext_sample(pc, regs, i, is_kernel); ctr_write(i, reset_value[i]); } else { ctr_write(i, 0); } } } /* The freeze bit was set by the interrupt. */ /* Clear the freeze bit, and reenable the interrupt. The * counters won't actually start until the rfi clears the PMM * bit. The PMM bit should not be set until after the interrupt * is cleared to avoid it getting lost in some hypervisor * environments. */ mtmsr(mfmsr() | MSR_PMM); pmc_start_ctrs(1); } struct op_powerpc_model op_model_fsl_emb = { .reg_setup = fsl_emb_reg_setup, .cpu_setup = fsl_emb_cpu_setup, .start = fsl_emb_start, .stop = fsl_emb_stop, .handle_interrupt = fsl_emb_handle_interrupt, };
gpl-2.0
cosmoecho/linux_xenvnuma
drivers/staging/media/go7007/go7007-fw.c
2312
40751
/* * Copyright (C) 2005-2006 Micronas USA Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. */ /* * This file contains code to generate a firmware image for the GO7007SB * encoder. Much of the firmware is read verbatim from a file, but some of * it concerning bitrate control and other things that can be configured at * run-time are generated dynamically. Note that the format headers * generated here do not affect the functioning of the encoder; they are * merely parroted back to the host at the start of each frame. */ #include <linux/module.h> #include <linux/init.h> #include <linux/time.h> #include <linux/mm.h> #include <linux/device.h> #include <linux/i2c.h> #include <linux/firmware.h> #include <linux/slab.h> #include <asm/byteorder.h> #include "go7007-priv.h" #define GO7007_FW_NAME "go7007/go7007tv.bin" /* Constants used in the source firmware image to describe code segments */ #define FLAG_MODE_MJPEG (1) #define FLAG_MODE_MPEG1 (1<<1) #define FLAG_MODE_MPEG2 (1<<2) #define FLAG_MODE_MPEG4 (1<<3) #define FLAG_MODE_H263 (1<<4) #define FLAG_MODE_ALL (FLAG_MODE_MJPEG | FLAG_MODE_MPEG1 | \ FLAG_MODE_MPEG2 | FLAG_MODE_MPEG4 | \ FLAG_MODE_H263) #define FLAG_SPECIAL (1<<8) #define SPECIAL_FRM_HEAD 0 #define SPECIAL_BRC_CTRL 1 #define SPECIAL_CONFIG 2 #define SPECIAL_SEQHEAD 3 #define SPECIAL_AV_SYNC 4 #define SPECIAL_FINAL 5 #define SPECIAL_AUDIO 6 #define SPECIAL_MODET 7 /* Little data class for creating MPEG headers bit-by-bit */ struct code_gen { unsigned char *p; /* destination */ u32 a; /* collects bits at the top of the variable */ int b; /* bit position of most recently-written bit */ int len; /* written out so far */ }; #define CODE_GEN(name, dest) struct code_gen name = { dest, 0, 32, 0 } #define CODE_ADD(name, val, length) do { \ name.b -= (length); \ name.a |= (val) << name.b; \ while (name.b <= 24) { \ *name.p = name.a >> 24; \ ++name.p; \ name.a <<= 8; \ name.b += 8; \ name.len += 8; \ } \ } while (0) #define CODE_LENGTH(name) (name.len + (32 - name.b)) /* Tables for creating the bitrate control data */ static const s16 converge_speed_ip[101] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 8, 8, 9, 10, 10, 11, 12, 13, 14, 15, 16, 17, 19, 20, 22, 23, 25, 27, 30, 32, 35, 38, 41, 45, 49, 53, 58, 63, 69, 76, 83, 91, 100 }; static const s16 converge_speed_ipb[101] = { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 9, 9, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 17, 18, 19, 20, 22, 23, 25, 26, 28, 30, 32, 34, 37, 40, 42, 46, 49, 53, 57, 61, 66, 71, 77, 83, 90, 97, 106, 115, 125, 135, 147, 161, 175, 191, 209, 228, 249, 273, 300 }; static const s16 LAMBDA_table[4][101] = { { 16, 16, 16, 16, 17, 17, 17, 18, 18, 18, 19, 19, 19, 20, 20, 20, 21, 21, 22, 22, 22, 23, 23, 24, 24, 25, 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 31, 31, 32, 32, 33, 33, 34, 35, 35, 36, 37, 37, 38, 39, 39, 40, 41, 42, 42, 43, 44, 45, 46, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 68, 69, 70, 72, 73, 74, 76, 77, 78, 80, 81, 83, 84, 86, 87, 89, 90, 92, 94, 96 }, { 20, 20, 20, 21, 21, 21, 22, 22, 23, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 33, 33, 34, 34, 35, 36, 36, 37, 38, 38, 39, 40, 40, 41, 42, 43, 43, 44, 45, 46, 47, 48, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 64, 65, 66, 67, 68, 70, 71, 72, 73, 75, 76, 78, 79, 80, 82, 83, 85, 86, 88, 90, 91, 93, 95, 96, 98, 100, 102, 103, 105, 107, 109, 111, 113, 115, 117, 120 }, { 24, 24, 24, 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 33, 33, 34, 34, 35, 36, 36, 37, 38, 38, 39, 40, 41, 41, 42, 43, 44, 44, 45, 46, 47, 48, 49, 50, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 62, 63, 64, 65, 66, 67, 69, 70, 71, 72, 74, 75, 76, 78, 79, 81, 82, 84, 85, 87, 88, 90, 92, 93, 95, 97, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 127, 129, 131, 134, 136, 138, 141, 144 }, { 32, 32, 33, 33, 34, 34, 35, 36, 36, 37, 38, 38, 39, 40, 41, 41, 42, 43, 44, 44, 45, 46, 47, 48, 49, 50, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 62, 63, 64, 65, 66, 67, 69, 70, 71, 72, 74, 75, 76, 78, 79, 81, 82, 84, 85, 87, 88, 90, 92, 93, 95, 97, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 127, 129, 131, 134, 136, 139, 141, 144, 146, 149, 152, 154, 157, 160, 163, 166, 169, 172, 175, 178, 181, 185, 188, 192 } }; /* MPEG blank frame generation tables */ enum mpeg_frame_type { PFRAME, BFRAME_PRE, BFRAME_POST, BFRAME_BIDIR, BFRAME_EMPTY }; static const u32 addrinctab[33][2] = { { 0x01, 1 }, { 0x03, 3 }, { 0x02, 3 }, { 0x03, 4 }, { 0x02, 4 }, { 0x03, 5 }, { 0x02, 5 }, { 0x07, 7 }, { 0x06, 7 }, { 0x0b, 8 }, { 0x0a, 8 }, { 0x09, 8 }, { 0x08, 8 }, { 0x07, 8 }, { 0x06, 8 }, { 0x17, 10 }, { 0x16, 10 }, { 0x15, 10 }, { 0x14, 10 }, { 0x13, 10 }, { 0x12, 10 }, { 0x23, 11 }, { 0x22, 11 }, { 0x21, 11 }, { 0x20, 11 }, { 0x1f, 11 }, { 0x1e, 11 }, { 0x1d, 11 }, { 0x1c, 11 }, { 0x1b, 11 }, { 0x1a, 11 }, { 0x19, 11 }, { 0x18, 11 } }; /* Standard JPEG tables */ static const u8 default_intra_quant_table[] = { 8, 16, 19, 22, 26, 27, 29, 34, 16, 16, 22, 24, 27, 29, 34, 37, 19, 22, 26, 27, 29, 34, 34, 38, 22, 22, 26, 27, 29, 34, 37, 40, 22, 26, 27, 29, 32, 35, 40, 48, 26, 27, 29, 32, 35, 40, 48, 58, 26, 27, 29, 34, 38, 46, 56, 69, 27, 29, 35, 38, 46, 56, 69, 83 }; static const u8 bits_dc_luminance[] = { 0, 0, 1, 5, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0 }; static const u8 val_dc_luminance[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 }; static const u8 bits_dc_chrominance[] = { 0, 0, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0 }; static const u8 val_dc_chrominance[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 }; static const u8 bits_ac_luminance[] = { 0, 0, 2, 1, 3, 3, 2, 4, 3, 5, 5, 4, 4, 0, 0, 1, 0x7d }; static const u8 val_ac_luminance[] = { 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12, 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07, 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08, 0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 0xd1, 0xf0, 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0a, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa }; static const u8 bits_ac_chrominance[] = { 0, 0, 2, 1, 2, 4, 4, 3, 4, 7, 5, 4, 4, 0, 1, 2, 0x77 }; static const u8 val_ac_chrominance[] = { 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21, 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71, 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91, 0xa1, 0xb1, 0xc1, 0x09, 0x23, 0x33, 0x52, 0xf0, 0x15, 0x62, 0x72, 0xd1, 0x0a, 0x16, 0x24, 0x34, 0xe1, 0x25, 0xf1, 0x17, 0x18, 0x19, 0x1a, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa }; /* Zig-zag mapping for quant table * * OK, let's do this mapping on the actual table above so it doesn't have * to be done on the fly. */ static const int zz[64] = { 0, 1, 8, 16, 9, 2, 3, 10, 17, 24, 32, 25, 18, 11, 4, 5, 12, 19, 26, 33, 40, 48, 41, 34, 27, 20, 13, 6, 7, 14, 21, 28, 35, 42, 49, 56, 57, 50, 43, 36, 29, 22, 15, 23, 30, 37, 44, 51, 58, 59, 52, 45, 38, 31, 39, 46, 53, 60, 61, 54, 47, 55, 62, 63 }; static int copy_packages(__le16 *dest, u16 *src, int pkg_cnt, int space) { int i, cnt = pkg_cnt * 32; if (space < cnt) return -1; for (i = 0; i < cnt; ++i) dest[i] = cpu_to_le16p(src + i); return cnt; } static int mjpeg_frame_header(struct go7007 *go, unsigned char *buf, int q) { int i, p = 0; buf[p++] = 0xff; buf[p++] = 0xd8; buf[p++] = 0xff; buf[p++] = 0xdb; buf[p++] = 0; buf[p++] = 2 + 65; buf[p++] = 0; buf[p++] = default_intra_quant_table[0]; for (i = 1; i < 64; ++i) /* buf[p++] = (default_intra_quant_table[i] * q) >> 3; */ buf[p++] = (default_intra_quant_table[zz[i]] * q) >> 3; buf[p++] = 0xff; buf[p++] = 0xc0; buf[p++] = 0; buf[p++] = 17; buf[p++] = 8; buf[p++] = go->height >> 8; buf[p++] = go->height & 0xff; buf[p++] = go->width >> 8; buf[p++] = go->width & 0xff; buf[p++] = 3; buf[p++] = 1; buf[p++] = 0x22; buf[p++] = 0; buf[p++] = 2; buf[p++] = 0x11; buf[p++] = 0; buf[p++] = 3; buf[p++] = 0x11; buf[p++] = 0; buf[p++] = 0xff; buf[p++] = 0xc4; buf[p++] = 418 >> 8; buf[p++] = 418 & 0xff; buf[p++] = 0x00; memcpy(buf + p, bits_dc_luminance + 1, 16); p += 16; memcpy(buf + p, val_dc_luminance, sizeof(val_dc_luminance)); p += sizeof(val_dc_luminance); buf[p++] = 0x01; memcpy(buf + p, bits_dc_chrominance + 1, 16); p += 16; memcpy(buf + p, val_dc_chrominance, sizeof(val_dc_chrominance)); p += sizeof(val_dc_chrominance); buf[p++] = 0x10; memcpy(buf + p, bits_ac_luminance + 1, 16); p += 16; memcpy(buf + p, val_ac_luminance, sizeof(val_ac_luminance)); p += sizeof(val_ac_luminance); buf[p++] = 0x11; memcpy(buf + p, bits_ac_chrominance + 1, 16); p += 16; memcpy(buf + p, val_ac_chrominance, sizeof(val_ac_chrominance)); p += sizeof(val_ac_chrominance); buf[p++] = 0xff; buf[p++] = 0xda; buf[p++] = 0; buf[p++] = 12; buf[p++] = 3; buf[p++] = 1; buf[p++] = 0x00; buf[p++] = 2; buf[p++] = 0x11; buf[p++] = 3; buf[p++] = 0x11; buf[p++] = 0; buf[p++] = 63; buf[p++] = 0; return p; } static int gen_mjpeghdr_to_package(struct go7007 *go, __le16 *code, int space) { u8 *buf; u16 mem = 0x3e00; unsigned int addr = 0x19; int size = 0, i, off = 0, chunk; buf = kzalloc(4096, GFP_KERNEL); if (buf == NULL) return -1; for (i = 1; i < 32; ++i) { mjpeg_frame_header(go, buf + size, i); size += 80; } chunk = mjpeg_frame_header(go, buf + size, 1); memmove(buf + size, buf + size + 80, chunk - 80); size += chunk - 80; for (i = 0; i < size; i += chunk * 2) { if (space - off < 32) { off = -1; goto done; } code[off + 1] = __cpu_to_le16(0x8000 | mem); chunk = 28; if (mem + chunk > 0x4000) chunk = 0x4000 - mem; if (i + 2 * chunk > size) chunk = (size - i) / 2; if (chunk < 28) { code[off] = __cpu_to_le16(0x4000 | chunk); code[off + 31] = __cpu_to_le16(addr++); mem = 0x3e00; } else { code[off] = __cpu_to_le16(0x1000 | 28); code[off + 31] = 0; mem += 28; } memcpy(&code[off + 2], buf + i, chunk * 2); off += 32; } done: kfree(buf); return off; } static int mpeg1_frame_header(struct go7007 *go, unsigned char *buf, int modulo, int pict_struct, enum mpeg_frame_type frame) { int i, j, mb_code, mb_len; int rows = go->interlace_coding ? go->height / 32 : go->height / 16; CODE_GEN(c, buf + 6); switch (frame) { case PFRAME: mb_code = 0x1; mb_len = 3; break; case BFRAME_PRE: mb_code = 0x2; mb_len = 4; break; case BFRAME_POST: mb_code = 0x2; mb_len = 3; break; case BFRAME_BIDIR: mb_code = 0x2; mb_len = 2; break; default: /* keep the compiler happy */ mb_code = mb_len = 0; break; } CODE_ADD(c, frame == PFRAME ? 0x2 : 0x3, 13); CODE_ADD(c, 0xffff, 16); CODE_ADD(c, go->format == V4L2_PIX_FMT_MPEG2 ? 0x7 : 0x4, 4); if (frame != PFRAME) CODE_ADD(c, go->format == V4L2_PIX_FMT_MPEG2 ? 0x7 : 0x4, 4); else CODE_ADD(c, 0, 4); /* Is this supposed to be here?? */ CODE_ADD(c, 0, 3); /* What is this?? */ /* Byte-align with zeros */ j = 8 - (CODE_LENGTH(c) % 8); if (j != 8) CODE_ADD(c, 0, j); if (go->format == V4L2_PIX_FMT_MPEG2) { CODE_ADD(c, 0x1, 24); CODE_ADD(c, 0xb5, 8); CODE_ADD(c, 0x844, 12); CODE_ADD(c, frame == PFRAME ? 0xff : 0x44, 8); if (go->interlace_coding) { CODE_ADD(c, pict_struct, 4); if (go->dvd_mode) CODE_ADD(c, 0x000, 11); else CODE_ADD(c, 0x200, 11); } else { CODE_ADD(c, 0x3, 4); CODE_ADD(c, 0x20c, 11); } /* Byte-align with zeros */ j = 8 - (CODE_LENGTH(c) % 8); if (j != 8) CODE_ADD(c, 0, j); } for (i = 0; i < rows; ++i) { CODE_ADD(c, 1, 24); CODE_ADD(c, i + 1, 8); CODE_ADD(c, 0x2, 6); CODE_ADD(c, 0x1, 1); CODE_ADD(c, mb_code, mb_len); if (go->interlace_coding) { CODE_ADD(c, 0x1, 2); CODE_ADD(c, pict_struct == 1 ? 0x0 : 0x1, 1); } if (frame == BFRAME_BIDIR) { CODE_ADD(c, 0x3, 2); if (go->interlace_coding) CODE_ADD(c, pict_struct == 1 ? 0x0 : 0x1, 1); } CODE_ADD(c, 0x3, 2); for (j = (go->width >> 4) - 2; j >= 33; j -= 33) CODE_ADD(c, 0x8, 11); CODE_ADD(c, addrinctab[j][0], addrinctab[j][1]); CODE_ADD(c, mb_code, mb_len); if (go->interlace_coding) { CODE_ADD(c, 0x1, 2); CODE_ADD(c, pict_struct == 1 ? 0x0 : 0x1, 1); } if (frame == BFRAME_BIDIR) { CODE_ADD(c, 0x3, 2); if (go->interlace_coding) CODE_ADD(c, pict_struct == 1 ? 0x0 : 0x1, 1); } CODE_ADD(c, 0x3, 2); /* Byte-align with zeros */ j = 8 - (CODE_LENGTH(c) % 8); if (j != 8) CODE_ADD(c, 0, j); } i = CODE_LENGTH(c) + 4 * 8; buf[2] = 0x00; buf[3] = 0x00; buf[4] = 0x01; buf[5] = 0x00; return i; } static int mpeg1_sequence_header(struct go7007 *go, unsigned char *buf, int ext) { int i, aspect_ratio, picture_rate; CODE_GEN(c, buf + 6); if (go->format == V4L2_PIX_FMT_MPEG1) { switch (go->aspect_ratio) { case GO7007_RATIO_4_3: aspect_ratio = go->standard == GO7007_STD_NTSC ? 3 : 2; break; case GO7007_RATIO_16_9: aspect_ratio = go->standard == GO7007_STD_NTSC ? 5 : 4; break; default: aspect_ratio = 1; break; } } else { switch (go->aspect_ratio) { case GO7007_RATIO_4_3: aspect_ratio = 2; break; case GO7007_RATIO_16_9: aspect_ratio = 3; break; default: aspect_ratio = 1; break; } } switch (go->sensor_framerate) { case 24000: picture_rate = 1; break; case 24024: picture_rate = 2; break; case 25025: picture_rate = go->interlace_coding ? 6 : 3; break; case 30000: picture_rate = go->interlace_coding ? 7 : 4; break; case 30030: picture_rate = go->interlace_coding ? 8 : 5; break; default: picture_rate = 5; /* 30 fps seems like a reasonable default */ break; } CODE_ADD(c, go->width, 12); CODE_ADD(c, go->height, 12); CODE_ADD(c, aspect_ratio, 4); CODE_ADD(c, picture_rate, 4); CODE_ADD(c, go->format == V4L2_PIX_FMT_MPEG2 ? 20000 : 0x3ffff, 18); CODE_ADD(c, 1, 1); CODE_ADD(c, go->format == V4L2_PIX_FMT_MPEG2 ? 112 : 20, 10); CODE_ADD(c, 0, 3); /* Byte-align with zeros */ i = 8 - (CODE_LENGTH(c) % 8); if (i != 8) CODE_ADD(c, 0, i); if (go->format == V4L2_PIX_FMT_MPEG2) { CODE_ADD(c, 0x1, 24); CODE_ADD(c, 0xb5, 8); CODE_ADD(c, 0x148, 12); if (go->interlace_coding) CODE_ADD(c, 0x20001, 20); else CODE_ADD(c, 0xa0001, 20); CODE_ADD(c, 0, 16); /* Byte-align with zeros */ i = 8 - (CODE_LENGTH(c) % 8); if (i != 8) CODE_ADD(c, 0, i); if (ext) { CODE_ADD(c, 0x1, 24); CODE_ADD(c, 0xb52, 12); CODE_ADD(c, go->standard == GO7007_STD_NTSC ? 2 : 1, 3); CODE_ADD(c, 0x105, 9); CODE_ADD(c, 0x505, 16); CODE_ADD(c, go->width, 14); CODE_ADD(c, 1, 1); CODE_ADD(c, go->height, 14); /* Byte-align with zeros */ i = 8 - (CODE_LENGTH(c) % 8); if (i != 8) CODE_ADD(c, 0, i); } } i = CODE_LENGTH(c) + 4 * 8; buf[0] = i & 0xff; buf[1] = i >> 8; buf[2] = 0x00; buf[3] = 0x00; buf[4] = 0x01; buf[5] = 0xb3; return i; } static int gen_mpeg1hdr_to_package(struct go7007 *go, __le16 *code, int space, int *framelen) { u8 *buf; u16 mem = 0x3e00; unsigned int addr = 0x19; int i, off = 0, chunk; buf = kzalloc(5120, GFP_KERNEL); if (buf == NULL) return -1; framelen[0] = mpeg1_frame_header(go, buf, 0, 1, PFRAME); if (go->interlace_coding) framelen[0] += mpeg1_frame_header(go, buf + framelen[0] / 8, 0, 2, PFRAME); buf[0] = framelen[0] & 0xff; buf[1] = framelen[0] >> 8; i = 368; framelen[1] = mpeg1_frame_header(go, buf + i, 0, 1, BFRAME_PRE); if (go->interlace_coding) framelen[1] += mpeg1_frame_header(go, buf + i + framelen[1] / 8, 0, 2, BFRAME_PRE); buf[i] = framelen[1] & 0xff; buf[i + 1] = framelen[1] >> 8; i += 1632; framelen[2] = mpeg1_frame_header(go, buf + i, 0, 1, BFRAME_POST); if (go->interlace_coding) framelen[2] += mpeg1_frame_header(go, buf + i + framelen[2] / 8, 0, 2, BFRAME_POST); buf[i] = framelen[2] & 0xff; buf[i + 1] = framelen[2] >> 8; i += 1432; framelen[3] = mpeg1_frame_header(go, buf + i, 0, 1, BFRAME_BIDIR); if (go->interlace_coding) framelen[3] += mpeg1_frame_header(go, buf + i + framelen[3] / 8, 0, 2, BFRAME_BIDIR); buf[i] = framelen[3] & 0xff; buf[i + 1] = framelen[3] >> 8; i += 1632 + 16; mpeg1_sequence_header(go, buf + i, 0); i += 40; for (i = 0; i < 5120; i += chunk * 2) { if (space - off < 32) { off = -1; goto done; } code[off + 1] = __cpu_to_le16(0x8000 | mem); chunk = 28; if (mem + chunk > 0x4000) chunk = 0x4000 - mem; if (i + 2 * chunk > 5120) chunk = (5120 - i) / 2; if (chunk < 28) { code[off] = __cpu_to_le16(0x4000 | chunk); code[off + 31] = __cpu_to_le16(addr); if (mem + chunk == 0x4000) { mem = 0x3e00; ++addr; } } else { code[off] = __cpu_to_le16(0x1000 | 28); code[off + 31] = 0; mem += 28; } memcpy(&code[off + 2], buf + i, chunk * 2); off += 32; } done: kfree(buf); return off; } static int vti_bitlen(struct go7007 *go) { unsigned int i, max_time_incr = go->sensor_framerate / go->fps_scale; for (i = 31; (max_time_incr & ((1 << i) - 1)) == max_time_incr; --i); return i + 1; } static int mpeg4_frame_header(struct go7007 *go, unsigned char *buf, int modulo, enum mpeg_frame_type frame) { int i; CODE_GEN(c, buf + 6); int mb_count = (go->width >> 4) * (go->height >> 4); CODE_ADD(c, frame == PFRAME ? 0x1 : 0x2, 2); if (modulo) CODE_ADD(c, 0x1, 1); CODE_ADD(c, 0x1, 2); CODE_ADD(c, 0, vti_bitlen(go)); CODE_ADD(c, 0x3, 2); if (frame == PFRAME) CODE_ADD(c, 0, 1); CODE_ADD(c, 0xc, 11); if (frame != PFRAME) CODE_ADD(c, 0x4, 3); if (frame != BFRAME_EMPTY) { for (i = 0; i < mb_count; ++i) { switch (frame) { case PFRAME: CODE_ADD(c, 0x1, 1); break; case BFRAME_PRE: CODE_ADD(c, 0x47, 8); break; case BFRAME_POST: CODE_ADD(c, 0x27, 7); break; case BFRAME_BIDIR: CODE_ADD(c, 0x5f, 8); break; case BFRAME_EMPTY: /* keep compiler quiet */ break; } } } /* Byte-align with a zero followed by ones */ i = 8 - (CODE_LENGTH(c) % 8); CODE_ADD(c, 0, 1); CODE_ADD(c, (1 << (i - 1)) - 1, i - 1); i = CODE_LENGTH(c) + 4 * 8; buf[0] = i & 0xff; buf[1] = i >> 8; buf[2] = 0x00; buf[3] = 0x00; buf[4] = 0x01; buf[5] = 0xb6; return i; } static int mpeg4_sequence_header(struct go7007 *go, unsigned char *buf, int ext) { const unsigned char head[] = { 0x00, 0x00, 0x01, 0xb0, go->pali, 0x00, 0x00, 0x01, 0xb5, 0x09, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x20, }; int i, aspect_ratio; int fps = go->sensor_framerate / go->fps_scale; CODE_GEN(c, buf + 2 + sizeof(head)); switch (go->aspect_ratio) { case GO7007_RATIO_4_3: aspect_ratio = go->standard == GO7007_STD_NTSC ? 3 : 2; break; case GO7007_RATIO_16_9: aspect_ratio = go->standard == GO7007_STD_NTSC ? 5 : 4; break; default: aspect_ratio = 1; break; } memcpy(buf + 2, head, sizeof(head)); CODE_ADD(c, 0x191, 17); CODE_ADD(c, aspect_ratio, 4); CODE_ADD(c, 0x1, 4); CODE_ADD(c, fps, 16); CODE_ADD(c, 0x3, 2); CODE_ADD(c, 1001, vti_bitlen(go)); CODE_ADD(c, 1, 1); CODE_ADD(c, go->width, 13); CODE_ADD(c, 1, 1); CODE_ADD(c, go->height, 13); CODE_ADD(c, 0x2830, 14); /* Byte-align */ i = 8 - (CODE_LENGTH(c) % 8); CODE_ADD(c, 0, 1); CODE_ADD(c, (1 << (i - 1)) - 1, i - 1); i = CODE_LENGTH(c) + sizeof(head) * 8; buf[0] = i & 0xff; buf[1] = i >> 8; return i; } static int gen_mpeg4hdr_to_package(struct go7007 *go, __le16 *code, int space, int *framelen) { u8 *buf; u16 mem = 0x3e00; unsigned int addr = 0x19; int i, off = 0, chunk; buf = kzalloc(5120, GFP_KERNEL); if (buf == NULL) return -1; framelen[0] = mpeg4_frame_header(go, buf, 0, PFRAME); i = 368; framelen[1] = mpeg4_frame_header(go, buf + i, 0, BFRAME_PRE); i += 1632; framelen[2] = mpeg4_frame_header(go, buf + i, 0, BFRAME_POST); i += 1432; framelen[3] = mpeg4_frame_header(go, buf + i, 0, BFRAME_BIDIR); i += 1632; mpeg4_frame_header(go, buf + i, 0, BFRAME_EMPTY); i += 16; mpeg4_sequence_header(go, buf + i, 0); i += 40; for (i = 0; i < 5120; i += chunk * 2) { if (space - off < 32) { off = -1; goto done; } code[off + 1] = __cpu_to_le16(0x8000 | mem); chunk = 28; if (mem + chunk > 0x4000) chunk = 0x4000 - mem; if (i + 2 * chunk > 5120) chunk = (5120 - i) / 2; if (chunk < 28) { code[off] = __cpu_to_le16(0x4000 | chunk); code[off + 31] = __cpu_to_le16(addr); if (mem + chunk == 0x4000) { mem = 0x3e00; ++addr; } } else { code[off] = __cpu_to_le16(0x1000 | 28); code[off + 31] = 0; mem += 28; } memcpy(&code[off + 2], buf + i, chunk * 2); off += 32; } mem = 0x3e00; addr = go->ipb ? 0x14f9 : 0x0af9; memset(buf, 0, 5120); framelen[4] = mpeg4_frame_header(go, buf, 1, PFRAME); i = 368; framelen[5] = mpeg4_frame_header(go, buf + i, 1, BFRAME_PRE); i += 1632; framelen[6] = mpeg4_frame_header(go, buf + i, 1, BFRAME_POST); i += 1432; framelen[7] = mpeg4_frame_header(go, buf + i, 1, BFRAME_BIDIR); i += 1632; mpeg4_frame_header(go, buf + i, 1, BFRAME_EMPTY); i += 16; for (i = 0; i < 5120; i += chunk * 2) { if (space - off < 32) { off = -1; goto done; } code[off + 1] = __cpu_to_le16(0x8000 | mem); chunk = 28; if (mem + chunk > 0x4000) chunk = 0x4000 - mem; if (i + 2 * chunk > 5120) chunk = (5120 - i) / 2; if (chunk < 28) { code[off] = __cpu_to_le16(0x4000 | chunk); code[off + 31] = __cpu_to_le16(addr); if (mem + chunk == 0x4000) { mem = 0x3e00; ++addr; } } else { code[off] = __cpu_to_le16(0x1000 | 28); code[off + 31] = 0; mem += 28; } memcpy(&code[off + 2], buf + i, chunk * 2); off += 32; } done: kfree(buf); return off; } static int brctrl_to_package(struct go7007 *go, __le16 *code, int space, int *framelen) { int converge_speed = 0; int lambda = (go->format == V4L2_PIX_FMT_MJPEG || go->dvd_mode) ? 100 : 0; int peak_rate = 6 * go->bitrate / 5; int vbv_buffer = go->format == V4L2_PIX_FMT_MJPEG ? go->bitrate : (go->dvd_mode ? 900000 : peak_rate); int fps = go->sensor_framerate / go->fps_scale; int q = 0; /* Bizarre math below depends on rounding errors in division */ u32 sgop_expt_addr = go->bitrate / 32 * (go->ipb ? 3 : 1) * 1001 / fps; u32 sgop_peak_addr = peak_rate / 32 * 1001 / fps; u32 total_expt_addr = go->bitrate / 32 * 1000 / fps * (fps / 1000); u32 vbv_alert_addr = vbv_buffer * 3 / (4 * 32); u32 cplx[] = { q > 0 ? sgop_expt_addr * q : 2 * go->width * go->height * (go->ipb ? 6 : 4) / 32, q > 0 ? sgop_expt_addr * q : 2 * go->width * go->height * (go->ipb ? 6 : 4) / 32, q > 0 ? sgop_expt_addr * q : 2 * go->width * go->height * (go->ipb ? 6 : 4) / 32, q > 0 ? sgop_expt_addr * q : 2 * go->width * go->height * (go->ipb ? 6 : 4) / 32, }; u32 calc_q = q > 0 ? q : cplx[0] / sgop_expt_addr; u16 pack[] = { 0x200e, 0x0000, 0xBF20, go->ipb ? converge_speed_ipb[converge_speed] : converge_speed_ip[converge_speed], 0xBF21, go->ipb ? 2 : 0, 0xBF22, go->ipb ? LAMBDA_table[0][lambda / 2 + 50] : 32767, 0xBF23, go->ipb ? LAMBDA_table[1][lambda] : 32767, 0xBF24, 32767, 0xBF25, lambda > 99 ? 32767 : LAMBDA_table[3][lambda], 0xBF26, sgop_expt_addr & 0x0000FFFF, 0xBF27, sgop_expt_addr >> 16, 0xBF28, sgop_peak_addr & 0x0000FFFF, 0xBF29, sgop_peak_addr >> 16, 0xBF2A, vbv_alert_addr & 0x0000FFFF, 0xBF2B, vbv_alert_addr >> 16, 0xBF2C, 0, 0xBF2D, 0, 0, 0, 0x200e, 0x0000, 0xBF2E, vbv_alert_addr & 0x0000FFFF, 0xBF2F, vbv_alert_addr >> 16, 0xBF30, cplx[0] & 0x0000FFFF, 0xBF31, cplx[0] >> 16, 0xBF32, cplx[1] & 0x0000FFFF, 0xBF33, cplx[1] >> 16, 0xBF34, cplx[2] & 0x0000FFFF, 0xBF35, cplx[2] >> 16, 0xBF36, cplx[3] & 0x0000FFFF, 0xBF37, cplx[3] >> 16, 0xBF38, 0, 0xBF39, 0, 0xBF3A, total_expt_addr & 0x0000FFFF, 0xBF3B, total_expt_addr >> 16, 0, 0, 0x200e, 0x0000, 0xBF3C, total_expt_addr & 0x0000FFFF, 0xBF3D, total_expt_addr >> 16, 0xBF3E, 0, 0xBF3F, 0, 0xBF48, 0, 0xBF49, 0, 0xBF4A, calc_q < 4 ? 4 : (calc_q > 124 ? 124 : calc_q), 0xBF4B, 4, 0xBF4C, 0, 0xBF4D, 0, 0xBF4E, 0, 0xBF4F, 0, 0xBF50, 0, 0xBF51, 0, 0, 0, 0x200e, 0x0000, 0xBF40, sgop_expt_addr & 0x0000FFFF, 0xBF41, sgop_expt_addr >> 16, 0xBF42, 0, 0xBF43, 0, 0xBF44, 0, 0xBF45, 0, 0xBF46, (go->width >> 4) * (go->height >> 4), 0xBF47, 0, 0xBF64, 0, 0xBF65, 0, 0xBF18, framelen[4], 0xBF19, framelen[5], 0xBF1A, framelen[6], 0xBF1B, framelen[7], 0, 0, #if 0 /* Remove once we don't care about matching */ 0x200e, 0x0000, 0xBF56, 4, 0xBF57, 0, 0xBF58, 5, 0xBF59, 0, 0xBF5A, 6, 0xBF5B, 0, 0xBF5C, 8, 0xBF5D, 0, 0xBF5E, 1, 0xBF5F, 0, 0xBF60, 1, 0xBF61, 0, 0xBF62, 0, 0xBF63, 0, 0, 0, #else 0x2008, 0x0000, 0xBF56, 4, 0xBF57, 0, 0xBF58, 5, 0xBF59, 0, 0xBF5A, 6, 0xBF5B, 0, 0xBF5C, 8, 0xBF5D, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, #endif 0x200e, 0x0000, 0xBF10, 0, 0xBF11, 0, 0xBF12, 0, 0xBF13, 0, 0xBF14, 0, 0xBF15, 0, 0xBF16, 0, 0xBF17, 0, 0xBF7E, 0, 0xBF7F, 1, 0xBF52, framelen[0], 0xBF53, framelen[1], 0xBF54, framelen[2], 0xBF55, framelen[3], 0, 0, }; return copy_packages(code, pack, 6, space); } static int config_package(struct go7007 *go, __le16 *code, int space) { int fps = go->sensor_framerate / go->fps_scale / 1000; int rows = go->interlace_coding ? go->height / 32 : go->height / 16; int brc_window_size = fps; int q_min = 2, q_max = 31; int THACCoeffSet0 = 0; u16 pack[] = { 0x200e, 0x0000, 0xc002, 0x14b4, 0xc003, 0x28b4, 0xc004, 0x3c5a, 0xdc05, 0x2a77, 0xc6c3, go->format == V4L2_PIX_FMT_MPEG4 ? 0 : (go->format == V4L2_PIX_FMT_H263 ? 0 : 1), 0xc680, go->format == V4L2_PIX_FMT_MPEG4 ? 0xf1 : (go->format == V4L2_PIX_FMT_H263 ? 0x61 : 0xd3), 0xc780, 0x0140, 0xe009, 0x0001, 0xc60f, 0x0008, 0xd4ff, 0x0002, 0xe403, 2340, 0xe406, 75, 0xd411, 0x0001, 0xd410, 0xa1d6, 0x0001, 0x2801, 0x200d, 0x0000, 0xe402, 0x018b, 0xe401, 0x8b01, 0xd472, (go->board_info->sensor_flags & GO7007_SENSOR_TV) && (!go->interlace_coding) ? 0x01b0 : 0x0170, 0xd475, (go->board_info->sensor_flags & GO7007_SENSOR_TV) && (!go->interlace_coding) ? 0x0008 : 0x0009, 0xc404, go->interlace_coding ? 0x44 : (go->format == V4L2_PIX_FMT_MPEG4 ? 0x11 : (go->format == V4L2_PIX_FMT_MPEG1 ? 0x02 : (go->format == V4L2_PIX_FMT_MPEG2 ? 0x04 : (go->format == V4L2_PIX_FMT_H263 ? 0x08 : 0x20)))), 0xbf0a, (go->format == V4L2_PIX_FMT_MPEG4 ? 8 : (go->format == V4L2_PIX_FMT_MPEG1 ? 1 : (go->format == V4L2_PIX_FMT_MPEG2 ? 2 : (go->format == V4L2_PIX_FMT_H263 ? 4 : 16)))) | ((go->repeat_seqhead ? 1 : 0) << 6) | ((go->dvd_mode ? 1 : 0) << 9) | ((go->gop_header_enable ? 1 : 0) << 10), 0xbf0b, 0, 0xdd5a, go->ipb ? 0x14 : 0x0a, 0xbf0c, 0, 0xbf0d, 0, 0xc683, THACCoeffSet0, 0xc40a, (go->width << 4) | rows, 0xe01a, go->board_info->hpi_buffer_cap, 0, 0, 0, 0, 0x2008, 0, 0xe402, 0x88, 0xe401, 0x8f01, 0xbf6a, 0, 0xbf6b, 0, 0xbf6c, 0, 0xbf6d, 0, 0xbf6e, 0, 0xbf6f, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x200e, 0, 0xbf66, brc_window_size, 0xbf67, 0, 0xbf68, q_min, 0xbf69, q_max, 0xbfe0, 0, 0xbfe1, 0, 0xbfe2, 0, 0xbfe3, go->ipb ? 3 : 1, 0xc031, go->board_info->sensor_flags & GO7007_SENSOR_VBI ? 1 : 0, 0xc01c, 0x1f, 0xdd8c, 0x15, 0xdd94, 0x15, 0xdd88, go->ipb ? 0x1401 : 0x0a01, 0xdd90, go->ipb ? 0x1401 : 0x0a01, 0, 0, 0x200e, 0, 0xbfe4, 0, 0xbfe5, 0, 0xbfe6, 0, 0xbfe7, fps << 8, 0xbfe8, 0x3a00, 0xbfe9, 0, 0xbfea, 0, 0xbfeb, 0, 0xbfec, (go->interlace_coding ? 1 << 15 : 0) | (go->modet_enable ? 0xa : 0) | (go->board_info->sensor_flags & GO7007_SENSOR_VBI ? 1 : 0), 0xbfed, 0, 0xbfee, 0, 0xbfef, 0, 0xbff0, go->board_info->sensor_flags & GO7007_SENSOR_TV ? 0xf060 : 0xb060, 0xbff1, 0, 0, 0, }; return copy_packages(code, pack, 5, space); } static int seqhead_to_package(struct go7007 *go, __le16 *code, int space, int (*sequence_header_func)(struct go7007 *go, unsigned char *buf, int ext)) { int vop_time_increment_bitlength = vti_bitlen(go); int fps = go->sensor_framerate / go->fps_scale * (go->interlace_coding ? 2 : 1); unsigned char buf[40] = { }; int len = sequence_header_func(go, buf, 1); u16 pack[] = { 0x2006, 0, 0xbf08, fps, 0xbf09, 0, 0xbff2, vop_time_increment_bitlength, 0xbff3, (1 << vop_time_increment_bitlength) - 1, 0xbfe6, 0, 0xbfe7, (fps / 1000) << 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x2007, 0, 0xc800, buf[2] << 8 | buf[3], 0xc801, buf[4] << 8 | buf[5], 0xc802, buf[6] << 8 | buf[7], 0xc803, buf[8] << 8 | buf[9], 0xc406, 64, 0xc407, len - 64, 0xc61b, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x200e, 0, 0xc808, buf[10] << 8 | buf[11], 0xc809, buf[12] << 8 | buf[13], 0xc80a, buf[14] << 8 | buf[15], 0xc80b, buf[16] << 8 | buf[17], 0xc80c, buf[18] << 8 | buf[19], 0xc80d, buf[20] << 8 | buf[21], 0xc80e, buf[22] << 8 | buf[23], 0xc80f, buf[24] << 8 | buf[25], 0xc810, buf[26] << 8 | buf[27], 0xc811, buf[28] << 8 | buf[29], 0xc812, buf[30] << 8 | buf[31], 0xc813, buf[32] << 8 | buf[33], 0xc814, buf[34] << 8 | buf[35], 0xc815, buf[36] << 8 | buf[37], 0, 0, 0, 0, 0, 0, }; return copy_packages(code, pack, 3, space); } static int relative_prime(int big, int little) { int remainder; while (little != 0) { remainder = big % little; big = little; little = remainder; } return big; } static int avsync_to_package(struct go7007 *go, __le16 *code, int space) { int arate = go->board_info->audio_rate * 1001 * go->fps_scale; int ratio = arate / go->sensor_framerate; int adjratio = ratio * 215 / 100; int rprime = relative_prime(go->sensor_framerate, arate % go->sensor_framerate); int f1 = (arate % go->sensor_framerate) / rprime; int f2 = (go->sensor_framerate - arate % go->sensor_framerate) / rprime; u16 pack[] = { 0x200e, 0, 0xbf98, (u16)((-adjratio) & 0xffff), 0xbf99, (u16)((-adjratio) >> 16), 0xbf92, 0, 0xbf93, 0, 0xbff4, f1 > f2 ? f1 : f2, 0xbff5, f1 < f2 ? f1 : f2, 0xbff6, f1 < f2 ? ratio : ratio + 1, 0xbff7, f1 > f2 ? ratio : ratio + 1, 0xbff8, 0, 0xbff9, 0, 0xbffa, adjratio & 0xffff, 0xbffb, adjratio >> 16, 0xbf94, 0, 0xbf95, 0, 0, 0, }; return copy_packages(code, pack, 1, space); } static int final_package(struct go7007 *go, __le16 *code, int space) { int rows = go->interlace_coding ? go->height / 32 : go->height / 16; u16 pack[] = { 0x8000, 0, 0, 0, 0, 0, 0, 2, ((go->board_info->sensor_flags & GO7007_SENSOR_TV) && (!go->interlace_coding) ? (1 << 14) | (1 << 9) : 0) | ((go->encoder_subsample ? 1 : 0) << 8) | (go->board_info->sensor_flags & GO7007_SENSOR_CONFIG_MASK), ((go->encoder_v_halve ? 1 : 0) << 14) | (go->encoder_v_halve ? rows << 9 : rows << 8) | (go->encoder_h_halve ? 1 << 6 : 0) | (go->encoder_h_halve ? go->width >> 3 : go->width >> 4), (1 << 15) | (go->encoder_v_offset << 6) | (1 << 7) | (go->encoder_h_offset >> 2), (1 << 6), 0, 0, ((go->fps_scale - 1) << 8) | (go->board_info->sensor_flags & GO7007_SENSOR_TV ? (1 << 7) : 0) | 0x41, go->ipb ? 0xd4c : 0x36b, (rows << 8) | (go->width >> 4), go->format == V4L2_PIX_FMT_MPEG4 ? 0x0404 : 0, (1 << 15) | ((go->interlace_coding ? 1 : 0) << 13) | ((go->closed_gop ? 1 : 0) << 12) | ((go->format == V4L2_PIX_FMT_MPEG4 ? 1 : 0) << 11) | /* (1 << 9) | */ ((go->ipb ? 3 : 0) << 7) | ((go->modet_enable ? 1 : 0) << 2) | ((go->dvd_mode ? 1 : 0) << 1) | 1, (go->format == V4L2_PIX_FMT_MPEG1 ? 0x89a0 : (go->format == V4L2_PIX_FMT_MPEG2 ? 0x89a0 : (go->format == V4L2_PIX_FMT_MJPEG ? 0x89a0 : (go->format == V4L2_PIX_FMT_MPEG4 ? 0x8920 : (go->format == V4L2_PIX_FMT_H263 ? 0x8920 : 0))))), go->ipb ? 0x1f15 : 0x1f0b, go->ipb ? 0x0015 : 0x000b, go->ipb ? 0xa800 : 0x5800, 0xffff, 0x0020 + 0x034b * 0, 0x0020 + 0x034b * 1, 0x0020 + 0x034b * 2, 0x0020 + 0x034b * 3, 0x0020 + 0x034b * 4, 0x0020 + 0x034b * 5, go->ipb ? (go->gop_size / 3) : go->gop_size, (go->height >> 4) * (go->width >> 4) * 110 / 100, }; return copy_packages(code, pack, 1, space); } static int audio_to_package(struct go7007 *go, __le16 *code, int space) { int clock_config = ((go->board_info->audio_flags & GO7007_AUDIO_I2S_MASTER ? 1 : 0) << 11) | ((go->board_info->audio_flags & GO7007_AUDIO_OKI_MODE ? 1 : 0) << 8) | (((go->board_info->audio_bclk_div / 4) - 1) << 4) | (go->board_info->audio_main_div - 1); u16 pack[] = { 0x200d, 0, 0x9002, 0, 0x9002, 0, 0x9031, 0, 0x9032, 0, 0x9033, 0, 0x9034, 0, 0x9035, 0, 0x9036, 0, 0x9037, 0, 0x9040, 0, 0x9000, clock_config, 0x9001, (go->board_info->audio_flags & 0xffff) | (1 << 9), 0x9000, ((go->board_info->audio_flags & GO7007_AUDIO_I2S_MASTER ? 1 : 0) << 10) | clock_config, 0, 0, 0, 0, 0x2005, 0, 0x9041, 0, 0x9042, 256, 0x9043, 0, 0x9044, 16, 0x9045, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; return copy_packages(code, pack, 2, space); } static int modet_to_package(struct go7007 *go, __le16 *code, int space) { int ret, mb, i, addr, cnt = 0; u16 pack[32]; u16 thresholds[] = { 0x200e, 0, 0xbf82, go->modet[0].pixel_threshold, 0xbf83, go->modet[1].pixel_threshold, 0xbf84, go->modet[2].pixel_threshold, 0xbf85, go->modet[3].pixel_threshold, 0xbf86, go->modet[0].motion_threshold, 0xbf87, go->modet[1].motion_threshold, 0xbf88, go->modet[2].motion_threshold, 0xbf89, go->modet[3].motion_threshold, 0xbf8a, go->modet[0].mb_threshold, 0xbf8b, go->modet[1].mb_threshold, 0xbf8c, go->modet[2].mb_threshold, 0xbf8d, go->modet[3].mb_threshold, 0xbf8e, 0, 0xbf8f, 0, 0, 0, }; ret = copy_packages(code, thresholds, 1, space); if (ret < 0) return -1; cnt += ret; addr = 0xbac0; memset(pack, 0, 64); i = 0; for (mb = 0; mb < 1624; ++mb) { pack[i * 2 + 3] <<= 2; pack[i * 2 + 3] |= go->modet_map[mb]; if (mb % 8 != 7) continue; pack[i * 2 + 2] = addr++; ++i; if (i == 10 || mb == 1623) { pack[0] = 0x2000 | i; ret = copy_packages(code + cnt, pack, 1, space - cnt); if (ret < 0) return -1; cnt += ret; i = 0; memset(pack, 0, 64); } pack[i * 2 + 3] = 0; } memset(pack, 0, 64); i = 0; for (addr = 0xbb90; addr < 0xbbfa; ++addr) { pack[i * 2 + 2] = addr; pack[i * 2 + 3] = 0; ++i; if (i == 10 || addr == 0xbbf9) { pack[0] = 0x2000 | i; ret = copy_packages(code + cnt, pack, 1, space - cnt); if (ret < 0) return -1; cnt += ret; i = 0; memset(pack, 0, 64); } } return cnt; } static int do_special(struct go7007 *go, u16 type, __le16 *code, int space, int *framelen) { switch (type) { case SPECIAL_FRM_HEAD: switch (go->format) { case V4L2_PIX_FMT_MJPEG: return gen_mjpeghdr_to_package(go, code, space); case V4L2_PIX_FMT_MPEG1: case V4L2_PIX_FMT_MPEG2: return gen_mpeg1hdr_to_package(go, code, space, framelen); case V4L2_PIX_FMT_MPEG4: return gen_mpeg4hdr_to_package(go, code, space, framelen); } case SPECIAL_BRC_CTRL: return brctrl_to_package(go, code, space, framelen); case SPECIAL_CONFIG: return config_package(go, code, space); case SPECIAL_SEQHEAD: switch (go->format) { case V4L2_PIX_FMT_MPEG1: case V4L2_PIX_FMT_MPEG2: return seqhead_to_package(go, code, space, mpeg1_sequence_header); case V4L2_PIX_FMT_MPEG4: return seqhead_to_package(go, code, space, mpeg4_sequence_header); default: return 0; } case SPECIAL_AV_SYNC: return avsync_to_package(go, code, space); case SPECIAL_FINAL: return final_package(go, code, space); case SPECIAL_AUDIO: return audio_to_package(go, code, space); case SPECIAL_MODET: return modet_to_package(go, code, space); } dev_err(go->dev, "firmware file contains unsupported feature %04x\n", type); return -1; } int go7007_construct_fw_image(struct go7007 *go, u8 **fw, int *fwlen) { const struct firmware *fw_entry; __le16 *code, *src; int framelen[8] = { }; /* holds the lengths of empty frame templates */ int codespace = 64 * 1024, i = 0, srclen, chunk_len, chunk_flags; int mode_flag; int ret; switch (go->format) { case V4L2_PIX_FMT_MJPEG: mode_flag = FLAG_MODE_MJPEG; break; case V4L2_PIX_FMT_MPEG1: mode_flag = FLAG_MODE_MPEG1; break; case V4L2_PIX_FMT_MPEG2: mode_flag = FLAG_MODE_MPEG2; break; case V4L2_PIX_FMT_MPEG4: mode_flag = FLAG_MODE_MPEG4; break; default: return -1; } if (request_firmware(&fw_entry, GO7007_FW_NAME, go->dev)) { dev_err(go->dev, "unable to load firmware from file \"%s\"\n", GO7007_FW_NAME); return -1; } code = kzalloc(codespace * 2, GFP_KERNEL); if (code == NULL) goto fw_failed; src = (__le16 *)fw_entry->data; srclen = fw_entry->size / 2; while (srclen >= 2) { chunk_flags = __le16_to_cpu(src[0]); chunk_len = __le16_to_cpu(src[1]); if (chunk_len + 2 > srclen) { dev_err(go->dev, "firmware file \"%s\" appears to be corrupted\n", GO7007_FW_NAME); goto fw_failed; } if (chunk_flags & mode_flag) { if (chunk_flags & FLAG_SPECIAL) { ret = do_special(go, __le16_to_cpu(src[2]), &code[i], codespace - i, framelen); if (ret < 0) { dev_err(go->dev, "insufficient memory for firmware construction\n"); goto fw_failed; } i += ret; } else { if (codespace - i < chunk_len) { dev_err(go->dev, "insufficient memory for firmware construction\n"); goto fw_failed; } memcpy(&code[i], &src[2], chunk_len * 2); i += chunk_len; } } srclen -= chunk_len + 2; src += chunk_len + 2; } release_firmware(fw_entry); *fw = (u8 *)code; *fwlen = i * 2; return 0; fw_failed: kfree(code); release_firmware(fw_entry); return -1; } MODULE_FIRMWARE(GO7007_FW_NAME);
gpl-2.0
ntb-ch/linux
drivers/uio/uio_sercos3.c
2312
6249
/* sercos3: UIO driver for the Automata Sercos III PCI card Copyright (C) 2008 Linutronix GmbH Author: John Ogness <john.ogness@linutronix.de> This is a straight-forward UIO driver, where interrupts are disabled by the interrupt handler and re-enabled via a write to the UIO device by the userspace-part. The only part that may seem odd is the use of a logical OR when storing and restoring enabled interrupts. This is done because the userspace-part could directly modify the Interrupt Enable Register at any time. To reduce possible conflicts, the kernel driver uses a logical OR to make more controlled changes (rather than blindly overwriting previous values). Race conditions exist if the userspace-part directly modifies the Interrupt Enable Register while in operation. The consequences are that certain interrupts would fail to be enabled or disabled. For this reason, the userspace-part should only directly modify the Interrupt Enable Register at the beginning (to get things going). The userspace-part can safely disable interrupts at any time using a write to the UIO device. */ #include <linux/device.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/uio_driver.h> #include <linux/io.h> #include <linux/slab.h> /* ID's for SERCOS III PCI card (PLX 9030) */ #define SERCOS_SUB_VENDOR_ID 0x1971 #define SERCOS_SUB_SYSID_3530 0x3530 #define SERCOS_SUB_SYSID_3535 0x3535 #define SERCOS_SUB_SYSID_3780 0x3780 /* Interrupt Enable Register */ #define IER0_OFFSET 0x08 /* Interrupt Status Register */ #define ISR0_OFFSET 0x18 struct sercos3_priv { u32 ier0_cache; spinlock_t ier0_cache_lock; }; /* this function assumes ier0_cache_lock is locked! */ static void sercos3_disable_interrupts(struct uio_info *info, struct sercos3_priv *priv) { void __iomem *ier0 = info->mem[3].internal_addr + IER0_OFFSET; /* add enabled interrupts to cache */ priv->ier0_cache |= ioread32(ier0); /* disable interrupts */ iowrite32(0, ier0); } /* this function assumes ier0_cache_lock is locked! */ static void sercos3_enable_interrupts(struct uio_info *info, struct sercos3_priv *priv) { void __iomem *ier0 = info->mem[3].internal_addr + IER0_OFFSET; /* restore previously enabled interrupts */ iowrite32(ioread32(ier0) | priv->ier0_cache, ier0); priv->ier0_cache = 0; } static irqreturn_t sercos3_handler(int irq, struct uio_info *info) { struct sercos3_priv *priv = info->priv; void __iomem *isr0 = info->mem[3].internal_addr + ISR0_OFFSET; void __iomem *ier0 = info->mem[3].internal_addr + IER0_OFFSET; if (!(ioread32(isr0) & ioread32(ier0))) return IRQ_NONE; spin_lock(&priv->ier0_cache_lock); sercos3_disable_interrupts(info, priv); spin_unlock(&priv->ier0_cache_lock); return IRQ_HANDLED; } static int sercos3_irqcontrol(struct uio_info *info, s32 irq_on) { struct sercos3_priv *priv = info->priv; spin_lock_irq(&priv->ier0_cache_lock); if (irq_on) sercos3_enable_interrupts(info, priv); else sercos3_disable_interrupts(info, priv); spin_unlock_irq(&priv->ier0_cache_lock); return 0; } static int sercos3_setup_iomem(struct pci_dev *dev, struct uio_info *info, int n, int pci_bar) { info->mem[n].addr = pci_resource_start(dev, pci_bar); if (!info->mem[n].addr) return -1; info->mem[n].internal_addr = ioremap(pci_resource_start(dev, pci_bar), pci_resource_len(dev, pci_bar)); if (!info->mem[n].internal_addr) return -1; info->mem[n].size = pci_resource_len(dev, pci_bar); info->mem[n].memtype = UIO_MEM_PHYS; return 0; } static int sercos3_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct uio_info *info; struct sercos3_priv *priv; int i; info = kzalloc(sizeof(struct uio_info), GFP_KERNEL); if (!info) return -ENOMEM; priv = kzalloc(sizeof(struct sercos3_priv), GFP_KERNEL); if (!priv) goto out_free; if (pci_enable_device(dev)) goto out_free_priv; if (pci_request_regions(dev, "sercos3")) goto out_disable; /* we only need PCI BAR's 0, 2, 3, 4, 5 */ if (sercos3_setup_iomem(dev, info, 0, 0)) goto out_unmap; if (sercos3_setup_iomem(dev, info, 1, 2)) goto out_unmap; if (sercos3_setup_iomem(dev, info, 2, 3)) goto out_unmap; if (sercos3_setup_iomem(dev, info, 3, 4)) goto out_unmap; if (sercos3_setup_iomem(dev, info, 4, 5)) goto out_unmap; spin_lock_init(&priv->ier0_cache_lock); info->priv = priv; info->name = "Sercos_III_PCI"; info->version = "0.0.1"; info->irq = dev->irq; info->irq_flags = IRQF_SHARED; info->handler = sercos3_handler; info->irqcontrol = sercos3_irqcontrol; pci_set_drvdata(dev, info); if (uio_register_device(&dev->dev, info)) goto out_unmap; return 0; out_unmap: for (i = 0; i < 5; i++) { if (info->mem[i].internal_addr) iounmap(info->mem[i].internal_addr); } pci_release_regions(dev); out_disable: pci_disable_device(dev); out_free_priv: kfree(priv); out_free: kfree(info); return -ENODEV; } static void sercos3_pci_remove(struct pci_dev *dev) { struct uio_info *info = pci_get_drvdata(dev); int i; uio_unregister_device(info); pci_release_regions(dev); pci_disable_device(dev); for (i = 0; i < 5; i++) { if (info->mem[i].internal_addr) iounmap(info->mem[i].internal_addr); } kfree(info->priv); kfree(info); } static struct pci_device_id sercos3_pci_ids[] = { { .vendor = PCI_VENDOR_ID_PLX, .device = PCI_DEVICE_ID_PLX_9030, .subvendor = SERCOS_SUB_VENDOR_ID, .subdevice = SERCOS_SUB_SYSID_3530, }, { .vendor = PCI_VENDOR_ID_PLX, .device = PCI_DEVICE_ID_PLX_9030, .subvendor = SERCOS_SUB_VENDOR_ID, .subdevice = SERCOS_SUB_SYSID_3535, }, { .vendor = PCI_VENDOR_ID_PLX, .device = PCI_DEVICE_ID_PLX_9030, .subvendor = SERCOS_SUB_VENDOR_ID, .subdevice = SERCOS_SUB_SYSID_3780, }, { 0, } }; static struct pci_driver sercos3_pci_driver = { .name = "sercos3", .id_table = sercos3_pci_ids, .probe = sercos3_pci_probe, .remove = sercos3_pci_remove, }; module_pci_driver(sercos3_pci_driver); MODULE_DESCRIPTION("UIO driver for the Automata Sercos III PCI card"); MODULE_AUTHOR("John Ogness <john.ogness@linutronix.de>"); MODULE_LICENSE("GPL v2");
gpl-2.0
Eliminater74/g3_kernel
arch/arm/mach-exynos/hotplug.c
4360
2688
/* linux arch/arm/mach-exynos4/hotplug.c * * Cloned from linux/arch/arm/mach-realview/hotplug.c * * Copyright (C) 2002 ARM Ltd. * All Rights Reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/smp.h> #include <linux/io.h> #include <asm/cacheflush.h> #include <asm/cp15.h> #include <asm/smp_plat.h> #include <mach/regs-pmu.h> extern volatile int pen_release; static inline void cpu_enter_lowpower(void) { unsigned int v; flush_cache_all(); asm volatile( " mcr p15, 0, %1, c7, c5, 0\n" " mcr p15, 0, %1, c7, c10, 4\n" /* * Turn off coherency */ " mrc p15, 0, %0, c1, c0, 1\n" " bic %0, %0, %3\n" " mcr p15, 0, %0, c1, c0, 1\n" " mrc p15, 0, %0, c1, c0, 0\n" " bic %0, %0, %2\n" " mcr p15, 0, %0, c1, c0, 0\n" : "=&r" (v) : "r" (0), "Ir" (CR_C), "Ir" (0x40) : "cc"); } static inline void cpu_leave_lowpower(void) { unsigned int v; asm volatile( "mrc p15, 0, %0, c1, c0, 0\n" " orr %0, %0, %1\n" " mcr p15, 0, %0, c1, c0, 0\n" " mrc p15, 0, %0, c1, c0, 1\n" " orr %0, %0, %2\n" " mcr p15, 0, %0, c1, c0, 1\n" : "=&r" (v) : "Ir" (CR_C), "Ir" (0x40) : "cc"); } static inline void platform_do_lowpower(unsigned int cpu, int *spurious) { for (;;) { /* make cpu1 to be turned off at next WFI command */ if (cpu == 1) __raw_writel(0, S5P_ARM_CORE1_CONFIGURATION); /* * here's the WFI */ asm(".word 0xe320f003\n" : : : "memory", "cc"); if (pen_release == cpu_logical_map(cpu)) { /* * OK, proper wakeup, we're done */ break; } /* * Getting here, means that we have come out of WFI without * having been woken up - this shouldn't happen * * Just note it happening - when we're woken, we can report * its occurrence. */ (*spurious)++; } } int platform_cpu_kill(unsigned int cpu) { return 1; } /* * platform-specific code to shutdown a CPU * * Called with IRQs disabled */ void platform_cpu_die(unsigned int cpu) { int spurious = 0; /* * we're ready for shutdown now, so do it */ cpu_enter_lowpower(); platform_do_lowpower(cpu, &spurious); /* * bring this CPU back into the world of cache * coherency, and then restore interrupts */ cpu_leave_lowpower(); if (spurious) pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious); } int platform_cpu_disable(unsigned int cpu) { /* * we don't allow CPU 0 to be shutdown (it is still too special * e.g. clock tick interrupts) */ return cpu == 0 ? -EPERM : 0; }
gpl-2.0
shakalaca/ASUS_ZenFone_ZE551KL
kernel/drivers/hwmon/fschmd.c
4360
41665
/* * fschmd.c * * Copyright (C) 2007 - 2009 Hans de Goede <hdegoede@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * Merged Fujitsu Siemens hwmon driver, supporting the Poseidon, Hermes, * Scylla, Heracles, Heimdall, Hades and Syleus chips * * Based on the original 2.4 fscscy, 2.6 fscpos, 2.6 fscher and 2.6 * (candidate) fschmd drivers: * Copyright (C) 2006 Thilo Cestonaro * <thilo.cestonaro.external@fujitsu-siemens.com> * Copyright (C) 2004, 2005 Stefan Ott <stefan@desire.ch> * Copyright (C) 2003, 2004 Reinhard Nissl <rnissl@gmx.de> * Copyright (c) 2001 Martin Knoblauch <mkn@teraport.de, knobi@knobisoft.de> * Copyright (C) 2000 Hermann Jung <hej@odn.de> */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/sysfs.h> #include <linux/dmi.h> #include <linux/fs.h> #include <linux/watchdog.h> #include <linux/miscdevice.h> #include <linux/uaccess.h> #include <linux/kref.h> /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x73, I2C_CLIENT_END }; /* Insmod parameters */ static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); enum chips { fscpos, fscher, fscscy, fschrc, fschmd, fschds, fscsyl }; /* * The FSCHMD registers and other defines */ /* chip identification */ #define FSCHMD_REG_IDENT_0 0x00 #define FSCHMD_REG_IDENT_1 0x01 #define FSCHMD_REG_IDENT_2 0x02 #define FSCHMD_REG_REVISION 0x03 /* global control and status */ #define FSCHMD_REG_EVENT_STATE 0x04 #define FSCHMD_REG_CONTROL 0x05 #define FSCHMD_CONTROL_ALERT_LED 0x01 /* watchdog */ static const u8 FSCHMD_REG_WDOG_CONTROL[7] = { 0x21, 0x21, 0x21, 0x21, 0x21, 0x28, 0x28 }; static const u8 FSCHMD_REG_WDOG_STATE[7] = { 0x23, 0x23, 0x23, 0x23, 0x23, 0x29, 0x29 }; static const u8 FSCHMD_REG_WDOG_PRESET[7] = { 0x28, 0x28, 0x28, 0x28, 0x28, 0x2a, 0x2a }; #define FSCHMD_WDOG_CONTROL_TRIGGER 0x10 #define FSCHMD_WDOG_CONTROL_STARTED 0x10 /* the same as trigger */ #define FSCHMD_WDOG_CONTROL_STOP 0x20 #define FSCHMD_WDOG_CONTROL_RESOLUTION 0x40 #define FSCHMD_WDOG_STATE_CARDRESET 0x02 /* voltages, weird order is to keep the same order as the old drivers */ static const u8 FSCHMD_REG_VOLT[7][6] = { { 0x45, 0x42, 0x48 }, /* pos */ { 0x45, 0x42, 0x48 }, /* her */ { 0x45, 0x42, 0x48 }, /* scy */ { 0x45, 0x42, 0x48 }, /* hrc */ { 0x45, 0x42, 0x48 }, /* hmd */ { 0x21, 0x20, 0x22 }, /* hds */ { 0x21, 0x20, 0x22, 0x23, 0x24, 0x25 }, /* syl */ }; static const int FSCHMD_NO_VOLT_SENSORS[7] = { 3, 3, 3, 3, 3, 3, 6 }; /* * minimum pwm at which the fan is driven (pwm can by increased depending on * the temp. Notice that for the scy some fans share there minimum speed. * Also notice that with the scy the sensor order is different than with the * other chips, this order was in the 2.4 driver and kept for consistency. */ static const u8 FSCHMD_REG_FAN_MIN[7][7] = { { 0x55, 0x65 }, /* pos */ { 0x55, 0x65, 0xb5 }, /* her */ { 0x65, 0x65, 0x55, 0xa5, 0x55, 0xa5 }, /* scy */ { 0x55, 0x65, 0xa5, 0xb5 }, /* hrc */ { 0x55, 0x65, 0xa5, 0xb5, 0xc5 }, /* hmd */ { 0x55, 0x65, 0xa5, 0xb5, 0xc5 }, /* hds */ { 0x54, 0x64, 0x74, 0x84, 0x94, 0xa4, 0xb4 }, /* syl */ }; /* actual fan speed */ static const u8 FSCHMD_REG_FAN_ACT[7][7] = { { 0x0e, 0x6b, 0xab }, /* pos */ { 0x0e, 0x6b, 0xbb }, /* her */ { 0x6b, 0x6c, 0x0e, 0xab, 0x5c, 0xbb }, /* scy */ { 0x0e, 0x6b, 0xab, 0xbb }, /* hrc */ { 0x5b, 0x6b, 0xab, 0xbb, 0xcb }, /* hmd */ { 0x5b, 0x6b, 0xab, 0xbb, 0xcb }, /* hds */ { 0x57, 0x67, 0x77, 0x87, 0x97, 0xa7, 0xb7 }, /* syl */ }; /* fan status registers */ static const u8 FSCHMD_REG_FAN_STATE[7][7] = { { 0x0d, 0x62, 0xa2 }, /* pos */ { 0x0d, 0x62, 0xb2 }, /* her */ { 0x62, 0x61, 0x0d, 0xa2, 0x52, 0xb2 }, /* scy */ { 0x0d, 0x62, 0xa2, 0xb2 }, /* hrc */ { 0x52, 0x62, 0xa2, 0xb2, 0xc2 }, /* hmd */ { 0x52, 0x62, 0xa2, 0xb2, 0xc2 }, /* hds */ { 0x50, 0x60, 0x70, 0x80, 0x90, 0xa0, 0xb0 }, /* syl */ }; /* fan ripple / divider registers */ static const u8 FSCHMD_REG_FAN_RIPPLE[7][7] = { { 0x0f, 0x6f, 0xaf }, /* pos */ { 0x0f, 0x6f, 0xbf }, /* her */ { 0x6f, 0x6f, 0x0f, 0xaf, 0x0f, 0xbf }, /* scy */ { 0x0f, 0x6f, 0xaf, 0xbf }, /* hrc */ { 0x5f, 0x6f, 0xaf, 0xbf, 0xcf }, /* hmd */ { 0x5f, 0x6f, 0xaf, 0xbf, 0xcf }, /* hds */ { 0x56, 0x66, 0x76, 0x86, 0x96, 0xa6, 0xb6 }, /* syl */ }; static const int FSCHMD_NO_FAN_SENSORS[7] = { 3, 3, 6, 4, 5, 5, 7 }; /* Fan status register bitmasks */ #define FSCHMD_FAN_ALARM 0x04 /* called fault by FSC! */ #define FSCHMD_FAN_NOT_PRESENT 0x08 #define FSCHMD_FAN_DISABLED 0x80 /* actual temperature registers */ static const u8 FSCHMD_REG_TEMP_ACT[7][11] = { { 0x64, 0x32, 0x35 }, /* pos */ { 0x64, 0x32, 0x35 }, /* her */ { 0x64, 0xD0, 0x32, 0x35 }, /* scy */ { 0x64, 0x32, 0x35 }, /* hrc */ { 0x70, 0x80, 0x90, 0xd0, 0xe0 }, /* hmd */ { 0x70, 0x80, 0x90, 0xd0, 0xe0 }, /* hds */ { 0x58, 0x68, 0x78, 0x88, 0x98, 0xa8, /* syl */ 0xb8, 0xc8, 0xd8, 0xe8, 0xf8 }, }; /* temperature state registers */ static const u8 FSCHMD_REG_TEMP_STATE[7][11] = { { 0x71, 0x81, 0x91 }, /* pos */ { 0x71, 0x81, 0x91 }, /* her */ { 0x71, 0xd1, 0x81, 0x91 }, /* scy */ { 0x71, 0x81, 0x91 }, /* hrc */ { 0x71, 0x81, 0x91, 0xd1, 0xe1 }, /* hmd */ { 0x71, 0x81, 0x91, 0xd1, 0xe1 }, /* hds */ { 0x59, 0x69, 0x79, 0x89, 0x99, 0xa9, /* syl */ 0xb9, 0xc9, 0xd9, 0xe9, 0xf9 }, }; /* * temperature high limit registers, FSC does not document these. Proven to be * there with field testing on the fscher and fschrc, already supported / used * in the fscscy 2.4 driver. FSC has confirmed that the fschmd has registers * at these addresses, but doesn't want to confirm they are the same as with * the fscher?? */ static const u8 FSCHMD_REG_TEMP_LIMIT[7][11] = { { 0, 0, 0 }, /* pos */ { 0x76, 0x86, 0x96 }, /* her */ { 0x76, 0xd6, 0x86, 0x96 }, /* scy */ { 0x76, 0x86, 0x96 }, /* hrc */ { 0x76, 0x86, 0x96, 0xd6, 0xe6 }, /* hmd */ { 0x76, 0x86, 0x96, 0xd6, 0xe6 }, /* hds */ { 0x5a, 0x6a, 0x7a, 0x8a, 0x9a, 0xaa, /* syl */ 0xba, 0xca, 0xda, 0xea, 0xfa }, }; /* * These were found through experimenting with an fscher, currently they are * not used, but we keep them around for future reference. * On the fscsyl AUTOP1 lives at 0x#c (so 0x5c for fan1, 0x6c for fan2, etc), * AUTOP2 lives at 0x#e, and 0x#1 is a bitmask defining which temps influence * the fan speed. * static const u8 FSCHER_REG_TEMP_AUTOP1[] = { 0x73, 0x83, 0x93 }; * static const u8 FSCHER_REG_TEMP_AUTOP2[] = { 0x75, 0x85, 0x95 }; */ static const int FSCHMD_NO_TEMP_SENSORS[7] = { 3, 3, 4, 3, 5, 5, 11 }; /* temp status register bitmasks */ #define FSCHMD_TEMP_WORKING 0x01 #define FSCHMD_TEMP_ALERT 0x02 #define FSCHMD_TEMP_DISABLED 0x80 /* there only really is an alarm if the sensor is working and alert == 1 */ #define FSCHMD_TEMP_ALARM_MASK \ (FSCHMD_TEMP_WORKING | FSCHMD_TEMP_ALERT) /* * Functions declarations */ static int fschmd_probe(struct i2c_client *client, const struct i2c_device_id *id); static int fschmd_detect(struct i2c_client *client, struct i2c_board_info *info); static int fschmd_remove(struct i2c_client *client); static struct fschmd_data *fschmd_update_device(struct device *dev); /* * Driver data (common to all clients) */ static const struct i2c_device_id fschmd_id[] = { { "fscpos", fscpos }, { "fscher", fscher }, { "fscscy", fscscy }, { "fschrc", fschrc }, { "fschmd", fschmd }, { "fschds", fschds }, { "fscsyl", fscsyl }, { } }; MODULE_DEVICE_TABLE(i2c, fschmd_id); static struct i2c_driver fschmd_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "fschmd", }, .probe = fschmd_probe, .remove = fschmd_remove, .id_table = fschmd_id, .detect = fschmd_detect, .address_list = normal_i2c, }; /* * Client data (each client gets its own) */ struct fschmd_data { struct i2c_client *client; struct device *hwmon_dev; struct mutex update_lock; struct mutex watchdog_lock; struct list_head list; /* member of the watchdog_data_list */ struct kref kref; struct miscdevice watchdog_miscdev; enum chips kind; unsigned long watchdog_is_open; char watchdog_expect_close; char watchdog_name[10]; /* must be unique to avoid sysfs conflict */ char valid; /* zero until following fields are valid */ unsigned long last_updated; /* in jiffies */ /* register values */ u8 revision; /* chip revision */ u8 global_control; /* global control register */ u8 watchdog_control; /* watchdog control register */ u8 watchdog_state; /* watchdog status register */ u8 watchdog_preset; /* watchdog counter preset on trigger val */ u8 volt[6]; /* voltage */ u8 temp_act[11]; /* temperature */ u8 temp_status[11]; /* status of sensor */ u8 temp_max[11]; /* high temp limit, notice: undocumented! */ u8 fan_act[7]; /* fans revolutions per second */ u8 fan_status[7]; /* fan status */ u8 fan_min[7]; /* fan min value for rps */ u8 fan_ripple[7]; /* divider for rps */ }; /* * Global variables to hold information read from special DMI tables, which are * available on FSC machines with an fscher or later chip. There is no need to * protect these with a lock as they are only modified from our attach function * which always gets called with the i2c-core lock held and never accessed * before the attach function is done with them. */ static int dmi_mult[6] = { 490, 200, 100, 100, 200, 100 }; static int dmi_offset[6] = { 0, 0, 0, 0, 0, 0 }; static int dmi_vref = -1; /* * Somewhat ugly :( global data pointer list with all fschmd devices, so that * we can find our device data as when using misc_register there is no other * method to get to ones device data from the open fop. */ static LIST_HEAD(watchdog_data_list); /* Note this lock not only protect list access, but also data.kref access */ static DEFINE_MUTEX(watchdog_data_mutex); /* * Release our data struct when we're detached from the i2c client *and* all * references to our watchdog device are released */ static void fschmd_release_resources(struct kref *ref) { struct fschmd_data *data = container_of(ref, struct fschmd_data, kref); kfree(data); } /* * Sysfs attr show / store functions */ static ssize_t show_in_value(struct device *dev, struct device_attribute *devattr, char *buf) { const int max_reading[3] = { 14200, 6600, 3300 }; int index = to_sensor_dev_attr(devattr)->index; struct fschmd_data *data = fschmd_update_device(dev); if (data->kind == fscher || data->kind >= fschrc) return sprintf(buf, "%d\n", (data->volt[index] * dmi_vref * dmi_mult[index]) / 255 + dmi_offset[index]); else return sprintf(buf, "%d\n", (data->volt[index] * max_reading[index] + 128) / 255); } #define TEMP_FROM_REG(val) (((val) - 128) * 1000) static ssize_t show_temp_value(struct device *dev, struct device_attribute *devattr, char *buf) { int index = to_sensor_dev_attr(devattr)->index; struct fschmd_data *data = fschmd_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_act[index])); } static ssize_t show_temp_max(struct device *dev, struct device_attribute *devattr, char *buf) { int index = to_sensor_dev_attr(devattr)->index; struct fschmd_data *data = fschmd_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max[index])); } static ssize_t store_temp_max(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int index = to_sensor_dev_attr(devattr)->index; struct fschmd_data *data = dev_get_drvdata(dev); long v; int err; err = kstrtol(buf, 10, &v); if (err) return err; v = clamp_val(v / 1000, -128, 127) + 128; mutex_lock(&data->update_lock); i2c_smbus_write_byte_data(to_i2c_client(dev), FSCHMD_REG_TEMP_LIMIT[data->kind][index], v); data->temp_max[index] = v; mutex_unlock(&data->update_lock); return count; } static ssize_t show_temp_fault(struct device *dev, struct device_attribute *devattr, char *buf) { int index = to_sensor_dev_attr(devattr)->index; struct fschmd_data *data = fschmd_update_device(dev); /* bit 0 set means sensor working ok, so no fault! */ if (data->temp_status[index] & FSCHMD_TEMP_WORKING) return sprintf(buf, "0\n"); else return sprintf(buf, "1\n"); } static ssize_t show_temp_alarm(struct device *dev, struct device_attribute *devattr, char *buf) { int index = to_sensor_dev_attr(devattr)->index; struct fschmd_data *data = fschmd_update_device(dev); if ((data->temp_status[index] & FSCHMD_TEMP_ALARM_MASK) == FSCHMD_TEMP_ALARM_MASK) return sprintf(buf, "1\n"); else return sprintf(buf, "0\n"); } #define RPM_FROM_REG(val) ((val) * 60) static ssize_t show_fan_value(struct device *dev, struct device_attribute *devattr, char *buf) { int index = to_sensor_dev_attr(devattr)->index; struct fschmd_data *data = fschmd_update_device(dev); return sprintf(buf, "%u\n", RPM_FROM_REG(data->fan_act[index])); } static ssize_t show_fan_div(struct device *dev, struct device_attribute *devattr, char *buf) { int index = to_sensor_dev_attr(devattr)->index; struct fschmd_data *data = fschmd_update_device(dev); /* bits 2..7 reserved => mask with 3 */ return sprintf(buf, "%d\n", 1 << (data->fan_ripple[index] & 3)); } static ssize_t store_fan_div(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { u8 reg; int index = to_sensor_dev_attr(devattr)->index; struct fschmd_data *data = dev_get_drvdata(dev); /* supported values: 2, 4, 8 */ unsigned long v; int err; err = kstrtoul(buf, 10, &v); if (err) return err; switch (v) { case 2: v = 1; break; case 4: v = 2; break; case 8: v = 3; break; default: dev_err(dev, "fan_div value %lu not supported. Choose one of 2, 4 or 8!\n", v); return -EINVAL; } mutex_lock(&data->update_lock); reg = i2c_smbus_read_byte_data(to_i2c_client(dev), FSCHMD_REG_FAN_RIPPLE[data->kind][index]); /* bits 2..7 reserved => mask with 0x03 */ reg &= ~0x03; reg |= v; i2c_smbus_write_byte_data(to_i2c_client(dev), FSCHMD_REG_FAN_RIPPLE[data->kind][index], reg); data->fan_ripple[index] = reg; mutex_unlock(&data->update_lock); return count; } static ssize_t show_fan_alarm(struct device *dev, struct device_attribute *devattr, char *buf) { int index = to_sensor_dev_attr(devattr)->index; struct fschmd_data *data = fschmd_update_device(dev); if (data->fan_status[index] & FSCHMD_FAN_ALARM) return sprintf(buf, "1\n"); else return sprintf(buf, "0\n"); } static ssize_t show_fan_fault(struct device *dev, struct device_attribute *devattr, char *buf) { int index = to_sensor_dev_attr(devattr)->index; struct fschmd_data *data = fschmd_update_device(dev); if (data->fan_status[index] & FSCHMD_FAN_NOT_PRESENT) return sprintf(buf, "1\n"); else return sprintf(buf, "0\n"); } static ssize_t show_pwm_auto_point1_pwm(struct device *dev, struct device_attribute *devattr, char *buf) { int index = to_sensor_dev_attr(devattr)->index; struct fschmd_data *data = fschmd_update_device(dev); int val = data->fan_min[index]; /* 0 = allow turning off (except on the syl), 1-255 = 50-100% */ if (val || data->kind == fscsyl) val = val / 2 + 128; return sprintf(buf, "%d\n", val); } static ssize_t store_pwm_auto_point1_pwm(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { int index = to_sensor_dev_attr(devattr)->index; struct fschmd_data *data = dev_get_drvdata(dev); unsigned long v; int err; err = kstrtoul(buf, 10, &v); if (err) return err; /* reg: 0 = allow turning off (except on the syl), 1-255 = 50-100% */ if (v || data->kind == fscsyl) { v = clamp_val(v, 128, 255); v = (v - 128) * 2 + 1; } mutex_lock(&data->update_lock); i2c_smbus_write_byte_data(to_i2c_client(dev), FSCHMD_REG_FAN_MIN[data->kind][index], v); data->fan_min[index] = v; mutex_unlock(&data->update_lock); return count; } /* * The FSC hwmon family has the ability to force an attached alert led to flash * from software, we export this as an alert_led sysfs attr */ static ssize_t show_alert_led(struct device *dev, struct device_attribute *devattr, char *buf) { struct fschmd_data *data = fschmd_update_device(dev); if (data->global_control & FSCHMD_CONTROL_ALERT_LED) return sprintf(buf, "1\n"); else return sprintf(buf, "0\n"); } static ssize_t store_alert_led(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { u8 reg; struct fschmd_data *data = dev_get_drvdata(dev); unsigned long v; int err; err = kstrtoul(buf, 10, &v); if (err) return err; mutex_lock(&data->update_lock); reg = i2c_smbus_read_byte_data(to_i2c_client(dev), FSCHMD_REG_CONTROL); if (v) reg |= FSCHMD_CONTROL_ALERT_LED; else reg &= ~FSCHMD_CONTROL_ALERT_LED; i2c_smbus_write_byte_data(to_i2c_client(dev), FSCHMD_REG_CONTROL, reg); data->global_control = reg; mutex_unlock(&data->update_lock); return count; } static DEVICE_ATTR(alert_led, 0644, show_alert_led, store_alert_led); static struct sensor_device_attribute fschmd_attr[] = { SENSOR_ATTR(in0_input, 0444, show_in_value, NULL, 0), SENSOR_ATTR(in1_input, 0444, show_in_value, NULL, 1), SENSOR_ATTR(in2_input, 0444, show_in_value, NULL, 2), SENSOR_ATTR(in3_input, 0444, show_in_value, NULL, 3), SENSOR_ATTR(in4_input, 0444, show_in_value, NULL, 4), SENSOR_ATTR(in5_input, 0444, show_in_value, NULL, 5), }; static struct sensor_device_attribute fschmd_temp_attr[] = { SENSOR_ATTR(temp1_input, 0444, show_temp_value, NULL, 0), SENSOR_ATTR(temp1_max, 0644, show_temp_max, store_temp_max, 0), SENSOR_ATTR(temp1_fault, 0444, show_temp_fault, NULL, 0), SENSOR_ATTR(temp1_alarm, 0444, show_temp_alarm, NULL, 0), SENSOR_ATTR(temp2_input, 0444, show_temp_value, NULL, 1), SENSOR_ATTR(temp2_max, 0644, show_temp_max, store_temp_max, 1), SENSOR_ATTR(temp2_fault, 0444, show_temp_fault, NULL, 1), SENSOR_ATTR(temp2_alarm, 0444, show_temp_alarm, NULL, 1), SENSOR_ATTR(temp3_input, 0444, show_temp_value, NULL, 2), SENSOR_ATTR(temp3_max, 0644, show_temp_max, store_temp_max, 2), SENSOR_ATTR(temp3_fault, 0444, show_temp_fault, NULL, 2), SENSOR_ATTR(temp3_alarm, 0444, show_temp_alarm, NULL, 2), SENSOR_ATTR(temp4_input, 0444, show_temp_value, NULL, 3), SENSOR_ATTR(temp4_max, 0644, show_temp_max, store_temp_max, 3), SENSOR_ATTR(temp4_fault, 0444, show_temp_fault, NULL, 3), SENSOR_ATTR(temp4_alarm, 0444, show_temp_alarm, NULL, 3), SENSOR_ATTR(temp5_input, 0444, show_temp_value, NULL, 4), SENSOR_ATTR(temp5_max, 0644, show_temp_max, store_temp_max, 4), SENSOR_ATTR(temp5_fault, 0444, show_temp_fault, NULL, 4), SENSOR_ATTR(temp5_alarm, 0444, show_temp_alarm, NULL, 4), SENSOR_ATTR(temp6_input, 0444, show_temp_value, NULL, 5), SENSOR_ATTR(temp6_max, 0644, show_temp_max, store_temp_max, 5), SENSOR_ATTR(temp6_fault, 0444, show_temp_fault, NULL, 5), SENSOR_ATTR(temp6_alarm, 0444, show_temp_alarm, NULL, 5), SENSOR_ATTR(temp7_input, 0444, show_temp_value, NULL, 6), SENSOR_ATTR(temp7_max, 0644, show_temp_max, store_temp_max, 6), SENSOR_ATTR(temp7_fault, 0444, show_temp_fault, NULL, 6), SENSOR_ATTR(temp7_alarm, 0444, show_temp_alarm, NULL, 6), SENSOR_ATTR(temp8_input, 0444, show_temp_value, NULL, 7), SENSOR_ATTR(temp8_max, 0644, show_temp_max, store_temp_max, 7), SENSOR_ATTR(temp8_fault, 0444, show_temp_fault, NULL, 7), SENSOR_ATTR(temp8_alarm, 0444, show_temp_alarm, NULL, 7), SENSOR_ATTR(temp9_input, 0444, show_temp_value, NULL, 8), SENSOR_ATTR(temp9_max, 0644, show_temp_max, store_temp_max, 8), SENSOR_ATTR(temp9_fault, 0444, show_temp_fault, NULL, 8), SENSOR_ATTR(temp9_alarm, 0444, show_temp_alarm, NULL, 8), SENSOR_ATTR(temp10_input, 0444, show_temp_value, NULL, 9), SENSOR_ATTR(temp10_max, 0644, show_temp_max, store_temp_max, 9), SENSOR_ATTR(temp10_fault, 0444, show_temp_fault, NULL, 9), SENSOR_ATTR(temp10_alarm, 0444, show_temp_alarm, NULL, 9), SENSOR_ATTR(temp11_input, 0444, show_temp_value, NULL, 10), SENSOR_ATTR(temp11_max, 0644, show_temp_max, store_temp_max, 10), SENSOR_ATTR(temp11_fault, 0444, show_temp_fault, NULL, 10), SENSOR_ATTR(temp11_alarm, 0444, show_temp_alarm, NULL, 10), }; static struct sensor_device_attribute fschmd_fan_attr[] = { SENSOR_ATTR(fan1_input, 0444, show_fan_value, NULL, 0), SENSOR_ATTR(fan1_div, 0644, show_fan_div, store_fan_div, 0), SENSOR_ATTR(fan1_alarm, 0444, show_fan_alarm, NULL, 0), SENSOR_ATTR(fan1_fault, 0444, show_fan_fault, NULL, 0), SENSOR_ATTR(pwm1_auto_point1_pwm, 0644, show_pwm_auto_point1_pwm, store_pwm_auto_point1_pwm, 0), SENSOR_ATTR(fan2_input, 0444, show_fan_value, NULL, 1), SENSOR_ATTR(fan2_div, 0644, show_fan_div, store_fan_div, 1), SENSOR_ATTR(fan2_alarm, 0444, show_fan_alarm, NULL, 1), SENSOR_ATTR(fan2_fault, 0444, show_fan_fault, NULL, 1), SENSOR_ATTR(pwm2_auto_point1_pwm, 0644, show_pwm_auto_point1_pwm, store_pwm_auto_point1_pwm, 1), SENSOR_ATTR(fan3_input, 0444, show_fan_value, NULL, 2), SENSOR_ATTR(fan3_div, 0644, show_fan_div, store_fan_div, 2), SENSOR_ATTR(fan3_alarm, 0444, show_fan_alarm, NULL, 2), SENSOR_ATTR(fan3_fault, 0444, show_fan_fault, NULL, 2), SENSOR_ATTR(pwm3_auto_point1_pwm, 0644, show_pwm_auto_point1_pwm, store_pwm_auto_point1_pwm, 2), SENSOR_ATTR(fan4_input, 0444, show_fan_value, NULL, 3), SENSOR_ATTR(fan4_div, 0644, show_fan_div, store_fan_div, 3), SENSOR_ATTR(fan4_alarm, 0444, show_fan_alarm, NULL, 3), SENSOR_ATTR(fan4_fault, 0444, show_fan_fault, NULL, 3), SENSOR_ATTR(pwm4_auto_point1_pwm, 0644, show_pwm_auto_point1_pwm, store_pwm_auto_point1_pwm, 3), SENSOR_ATTR(fan5_input, 0444, show_fan_value, NULL, 4), SENSOR_ATTR(fan5_div, 0644, show_fan_div, store_fan_div, 4), SENSOR_ATTR(fan5_alarm, 0444, show_fan_alarm, NULL, 4), SENSOR_ATTR(fan5_fault, 0444, show_fan_fault, NULL, 4), SENSOR_ATTR(pwm5_auto_point1_pwm, 0644, show_pwm_auto_point1_pwm, store_pwm_auto_point1_pwm, 4), SENSOR_ATTR(fan6_input, 0444, show_fan_value, NULL, 5), SENSOR_ATTR(fan6_div, 0644, show_fan_div, store_fan_div, 5), SENSOR_ATTR(fan6_alarm, 0444, show_fan_alarm, NULL, 5), SENSOR_ATTR(fan6_fault, 0444, show_fan_fault, NULL, 5), SENSOR_ATTR(pwm6_auto_point1_pwm, 0644, show_pwm_auto_point1_pwm, store_pwm_auto_point1_pwm, 5), SENSOR_ATTR(fan7_input, 0444, show_fan_value, NULL, 6), SENSOR_ATTR(fan7_div, 0644, show_fan_div, store_fan_div, 6), SENSOR_ATTR(fan7_alarm, 0444, show_fan_alarm, NULL, 6), SENSOR_ATTR(fan7_fault, 0444, show_fan_fault, NULL, 6), SENSOR_ATTR(pwm7_auto_point1_pwm, 0644, show_pwm_auto_point1_pwm, store_pwm_auto_point1_pwm, 6), }; /* * Watchdog routines */ static int watchdog_set_timeout(struct fschmd_data *data, int timeout) { int ret, resolution; int kind = data->kind + 1; /* 0-x array index -> 1-x module param */ /* 2 second or 60 second resolution? */ if (timeout <= 510 || kind == fscpos || kind == fscscy) resolution = 2; else resolution = 60; if (timeout < resolution || timeout > (resolution * 255)) return -EINVAL; mutex_lock(&data->watchdog_lock); if (!data->client) { ret = -ENODEV; goto leave; } if (resolution == 2) data->watchdog_control &= ~FSCHMD_WDOG_CONTROL_RESOLUTION; else data->watchdog_control |= FSCHMD_WDOG_CONTROL_RESOLUTION; data->watchdog_preset = DIV_ROUND_UP(timeout, resolution); /* Write new timeout value */ i2c_smbus_write_byte_data(data->client, FSCHMD_REG_WDOG_PRESET[data->kind], data->watchdog_preset); /* Write new control register, do not trigger! */ i2c_smbus_write_byte_data(data->client, FSCHMD_REG_WDOG_CONTROL[data->kind], data->watchdog_control & ~FSCHMD_WDOG_CONTROL_TRIGGER); ret = data->watchdog_preset * resolution; leave: mutex_unlock(&data->watchdog_lock); return ret; } static int watchdog_get_timeout(struct fschmd_data *data) { int timeout; mutex_lock(&data->watchdog_lock); if (data->watchdog_control & FSCHMD_WDOG_CONTROL_RESOLUTION) timeout = data->watchdog_preset * 60; else timeout = data->watchdog_preset * 2; mutex_unlock(&data->watchdog_lock); return timeout; } static int watchdog_trigger(struct fschmd_data *data) { int ret = 0; mutex_lock(&data->watchdog_lock); if (!data->client) { ret = -ENODEV; goto leave; } data->watchdog_control |= FSCHMD_WDOG_CONTROL_TRIGGER; i2c_smbus_write_byte_data(data->client, FSCHMD_REG_WDOG_CONTROL[data->kind], data->watchdog_control); leave: mutex_unlock(&data->watchdog_lock); return ret; } static int watchdog_stop(struct fschmd_data *data) { int ret = 0; mutex_lock(&data->watchdog_lock); if (!data->client) { ret = -ENODEV; goto leave; } data->watchdog_control &= ~FSCHMD_WDOG_CONTROL_STARTED; /* * Don't store the stop flag in our watchdog control register copy, as * its a write only bit (read always returns 0) */ i2c_smbus_write_byte_data(data->client, FSCHMD_REG_WDOG_CONTROL[data->kind], data->watchdog_control | FSCHMD_WDOG_CONTROL_STOP); leave: mutex_unlock(&data->watchdog_lock); return ret; } static int watchdog_open(struct inode *inode, struct file *filp) { struct fschmd_data *pos, *data = NULL; int watchdog_is_open; /* * We get called from drivers/char/misc.c with misc_mtx hold, and we * call misc_register() from fschmd_probe() with watchdog_data_mutex * hold, as misc_register() takes the misc_mtx lock, this is a possible * deadlock, so we use mutex_trylock here. */ if (!mutex_trylock(&watchdog_data_mutex)) return -ERESTARTSYS; list_for_each_entry(pos, &watchdog_data_list, list) { if (pos->watchdog_miscdev.minor == iminor(inode)) { data = pos; break; } } /* Note we can never not have found data, so we don't check for this */ watchdog_is_open = test_and_set_bit(0, &data->watchdog_is_open); if (!watchdog_is_open) kref_get(&data->kref); mutex_unlock(&watchdog_data_mutex); if (watchdog_is_open) return -EBUSY; /* Start the watchdog */ watchdog_trigger(data); filp->private_data = data; return nonseekable_open(inode, filp); } static int watchdog_release(struct inode *inode, struct file *filp) { struct fschmd_data *data = filp->private_data; if (data->watchdog_expect_close) { watchdog_stop(data); data->watchdog_expect_close = 0; } else { watchdog_trigger(data); dev_crit(&data->client->dev, "unexpected close, not stopping watchdog!\n"); } clear_bit(0, &data->watchdog_is_open); mutex_lock(&watchdog_data_mutex); kref_put(&data->kref, fschmd_release_resources); mutex_unlock(&watchdog_data_mutex); return 0; } static ssize_t watchdog_write(struct file *filp, const char __user *buf, size_t count, loff_t *offset) { int ret; struct fschmd_data *data = filp->private_data; if (count) { if (!nowayout) { size_t i; /* Clear it in case it was set with a previous write */ data->watchdog_expect_close = 0; for (i = 0; i != count; i++) { char c; if (get_user(c, buf + i)) return -EFAULT; if (c == 'V') data->watchdog_expect_close = 1; } } ret = watchdog_trigger(data); if (ret < 0) return ret; } return count; } static long watchdog_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct watchdog_info ident = { .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_CARDRESET, .identity = "FSC watchdog" }; int i, ret = 0; struct fschmd_data *data = filp->private_data; switch (cmd) { case WDIOC_GETSUPPORT: ident.firmware_version = data->revision; if (!nowayout) ident.options |= WDIOF_MAGICCLOSE; if (copy_to_user((void __user *)arg, &ident, sizeof(ident))) ret = -EFAULT; break; case WDIOC_GETSTATUS: ret = put_user(0, (int __user *)arg); break; case WDIOC_GETBOOTSTATUS: if (data->watchdog_state & FSCHMD_WDOG_STATE_CARDRESET) ret = put_user(WDIOF_CARDRESET, (int __user *)arg); else ret = put_user(0, (int __user *)arg); break; case WDIOC_KEEPALIVE: ret = watchdog_trigger(data); break; case WDIOC_GETTIMEOUT: i = watchdog_get_timeout(data); ret = put_user(i, (int __user *)arg); break; case WDIOC_SETTIMEOUT: if (get_user(i, (int __user *)arg)) { ret = -EFAULT; break; } ret = watchdog_set_timeout(data, i); if (ret > 0) ret = put_user(ret, (int __user *)arg); break; case WDIOC_SETOPTIONS: if (get_user(i, (int __user *)arg)) { ret = -EFAULT; break; } if (i & WDIOS_DISABLECARD) ret = watchdog_stop(data); else if (i & WDIOS_ENABLECARD) ret = watchdog_trigger(data); else ret = -EINVAL; break; default: ret = -ENOTTY; } return ret; } static const struct file_operations watchdog_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .open = watchdog_open, .release = watchdog_release, .write = watchdog_write, .unlocked_ioctl = watchdog_ioctl, }; /* * Detect, register, unregister and update device functions */ /* * DMI decode routine to read voltage scaling factors from special DMI tables, * which are available on FSC machines with an fscher or later chip. */ static void fschmd_dmi_decode(const struct dmi_header *header, void *dummy) { int i, mult[3] = { 0 }, offset[3] = { 0 }, vref = 0, found = 0; /* * dmi code ugliness, we get passed the address of the contents of * a complete DMI record, but in the form of a dmi_header pointer, in * reality this address holds header->length bytes of which the header * are the first 4 bytes */ u8 *dmi_data = (u8 *)header; /* We are looking for OEM-specific type 185 */ if (header->type != 185) return; /* * we are looking for what Siemens calls "subtype" 19, the subtype * is stored in byte 5 of the dmi block */ if (header->length < 5 || dmi_data[4] != 19) return; /* * After the subtype comes 1 unknown byte and then blocks of 5 bytes, * consisting of what Siemens calls an "Entity" number, followed by * 2 16-bit words in LSB first order */ for (i = 6; (i + 4) < header->length; i += 5) { /* entity 1 - 3: voltage multiplier and offset */ if (dmi_data[i] >= 1 && dmi_data[i] <= 3) { /* Our in sensors order and the DMI order differ */ const int shuffle[3] = { 1, 0, 2 }; int in = shuffle[dmi_data[i] - 1]; /* Check for twice the same entity */ if (found & (1 << in)) return; mult[in] = dmi_data[i + 1] | (dmi_data[i + 2] << 8); offset[in] = dmi_data[i + 3] | (dmi_data[i + 4] << 8); found |= 1 << in; } /* entity 7: reference voltage */ if (dmi_data[i] == 7) { /* Check for twice the same entity */ if (found & 0x08) return; vref = dmi_data[i + 1] | (dmi_data[i + 2] << 8); found |= 0x08; } } if (found == 0x0F) { for (i = 0; i < 3; i++) { dmi_mult[i] = mult[i] * 10; dmi_offset[i] = offset[i] * 10; } /* * According to the docs there should be separate dmi entries * for the mult's and offsets of in3-5 of the syl, but on * my test machine these are not present */ dmi_mult[3] = dmi_mult[2]; dmi_mult[4] = dmi_mult[1]; dmi_mult[5] = dmi_mult[2]; dmi_offset[3] = dmi_offset[2]; dmi_offset[4] = dmi_offset[1]; dmi_offset[5] = dmi_offset[2]; dmi_vref = vref; } } static int fschmd_detect(struct i2c_client *client, struct i2c_board_info *info) { enum chips kind; struct i2c_adapter *adapter = client->adapter; char id[4]; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; /* Detect & Identify the chip */ id[0] = i2c_smbus_read_byte_data(client, FSCHMD_REG_IDENT_0); id[1] = i2c_smbus_read_byte_data(client, FSCHMD_REG_IDENT_1); id[2] = i2c_smbus_read_byte_data(client, FSCHMD_REG_IDENT_2); id[3] = '\0'; if (!strcmp(id, "PEG")) kind = fscpos; else if (!strcmp(id, "HER")) kind = fscher; else if (!strcmp(id, "SCY")) kind = fscscy; else if (!strcmp(id, "HRC")) kind = fschrc; else if (!strcmp(id, "HMD")) kind = fschmd; else if (!strcmp(id, "HDS")) kind = fschds; else if (!strcmp(id, "SYL")) kind = fscsyl; else return -ENODEV; strlcpy(info->type, fschmd_id[kind].name, I2C_NAME_SIZE); return 0; } static int fschmd_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct fschmd_data *data; const char * const names[7] = { "Poseidon", "Hermes", "Scylla", "Heracles", "Heimdall", "Hades", "Syleus" }; const int watchdog_minors[] = { WATCHDOG_MINOR, 212, 213, 214, 215 }; int i, err; enum chips kind = id->driver_data; data = kzalloc(sizeof(struct fschmd_data), GFP_KERNEL); if (!data) return -ENOMEM; i2c_set_clientdata(client, data); mutex_init(&data->update_lock); mutex_init(&data->watchdog_lock); INIT_LIST_HEAD(&data->list); kref_init(&data->kref); /* * Store client pointer in our data struct for watchdog usage * (where the client is found through a data ptr instead of the * otherway around) */ data->client = client; data->kind = kind; if (kind == fscpos) { /* * The Poseidon has hardwired temp limits, fill these * in for the alarm resetting code */ data->temp_max[0] = 70 + 128; data->temp_max[1] = 50 + 128; data->temp_max[2] = 50 + 128; } /* Read the special DMI table for fscher and newer chips */ if ((kind == fscher || kind >= fschrc) && dmi_vref == -1) { dmi_walk(fschmd_dmi_decode, NULL); if (dmi_vref == -1) { dev_warn(&client->dev, "Couldn't get voltage scaling factors from " "BIOS DMI table, using builtin defaults\n"); dmi_vref = 33; } } /* Read in some never changing registers */ data->revision = i2c_smbus_read_byte_data(client, FSCHMD_REG_REVISION); data->global_control = i2c_smbus_read_byte_data(client, FSCHMD_REG_CONTROL); data->watchdog_control = i2c_smbus_read_byte_data(client, FSCHMD_REG_WDOG_CONTROL[data->kind]); data->watchdog_state = i2c_smbus_read_byte_data(client, FSCHMD_REG_WDOG_STATE[data->kind]); data->watchdog_preset = i2c_smbus_read_byte_data(client, FSCHMD_REG_WDOG_PRESET[data->kind]); err = device_create_file(&client->dev, &dev_attr_alert_led); if (err) goto exit_detach; for (i = 0; i < FSCHMD_NO_VOLT_SENSORS[data->kind]; i++) { err = device_create_file(&client->dev, &fschmd_attr[i].dev_attr); if (err) goto exit_detach; } for (i = 0; i < (FSCHMD_NO_TEMP_SENSORS[data->kind] * 4); i++) { /* Poseidon doesn't have TEMP_LIMIT registers */ if (kind == fscpos && fschmd_temp_attr[i].dev_attr.show == show_temp_max) continue; if (kind == fscsyl) { if (i % 4 == 0) data->temp_status[i / 4] = i2c_smbus_read_byte_data(client, FSCHMD_REG_TEMP_STATE [data->kind][i / 4]); if (data->temp_status[i / 4] & FSCHMD_TEMP_DISABLED) continue; } err = device_create_file(&client->dev, &fschmd_temp_attr[i].dev_attr); if (err) goto exit_detach; } for (i = 0; i < (FSCHMD_NO_FAN_SENSORS[data->kind] * 5); i++) { /* Poseidon doesn't have a FAN_MIN register for its 3rd fan */ if (kind == fscpos && !strcmp(fschmd_fan_attr[i].dev_attr.attr.name, "pwm3_auto_point1_pwm")) continue; if (kind == fscsyl) { if (i % 5 == 0) data->fan_status[i / 5] = i2c_smbus_read_byte_data(client, FSCHMD_REG_FAN_STATE [data->kind][i / 5]); if (data->fan_status[i / 5] & FSCHMD_FAN_DISABLED) continue; } err = device_create_file(&client->dev, &fschmd_fan_attr[i].dev_attr); if (err) goto exit_detach; } data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); data->hwmon_dev = NULL; goto exit_detach; } /* * We take the data_mutex lock early so that watchdog_open() cannot * run when misc_register() has completed, but we've not yet added * our data to the watchdog_data_list (and set the default timeout) */ mutex_lock(&watchdog_data_mutex); for (i = 0; i < ARRAY_SIZE(watchdog_minors); i++) { /* Register our watchdog part */ snprintf(data->watchdog_name, sizeof(data->watchdog_name), "watchdog%c", (i == 0) ? '\0' : ('0' + i)); data->watchdog_miscdev.name = data->watchdog_name; data->watchdog_miscdev.fops = &watchdog_fops; data->watchdog_miscdev.minor = watchdog_minors[i]; err = misc_register(&data->watchdog_miscdev); if (err == -EBUSY) continue; if (err) { data->watchdog_miscdev.minor = 0; dev_err(&client->dev, "Registering watchdog chardev: %d\n", err); break; } list_add(&data->list, &watchdog_data_list); watchdog_set_timeout(data, 60); dev_info(&client->dev, "Registered watchdog chardev major 10, minor: %d\n", watchdog_minors[i]); break; } if (i == ARRAY_SIZE(watchdog_minors)) { data->watchdog_miscdev.minor = 0; dev_warn(&client->dev, "Couldn't register watchdog chardev (due to no free minor)\n"); } mutex_unlock(&watchdog_data_mutex); dev_info(&client->dev, "Detected FSC %s chip, revision: %d\n", names[data->kind], (int) data->revision); return 0; exit_detach: fschmd_remove(client); /* will also free data for us */ return err; } static int fschmd_remove(struct i2c_client *client) { struct fschmd_data *data = i2c_get_clientdata(client); int i; /* Unregister the watchdog (if registered) */ if (data->watchdog_miscdev.minor) { misc_deregister(&data->watchdog_miscdev); if (data->watchdog_is_open) { dev_warn(&client->dev, "i2c client detached with watchdog open! " "Stopping watchdog.\n"); watchdog_stop(data); } mutex_lock(&watchdog_data_mutex); list_del(&data->list); mutex_unlock(&watchdog_data_mutex); /* Tell the watchdog code the client is gone */ mutex_lock(&data->watchdog_lock); data->client = NULL; mutex_unlock(&data->watchdog_lock); } /* * Check if registered in case we're called from fschmd_detect * to cleanup after an error */ if (data->hwmon_dev) hwmon_device_unregister(data->hwmon_dev); device_remove_file(&client->dev, &dev_attr_alert_led); for (i = 0; i < (FSCHMD_NO_VOLT_SENSORS[data->kind]); i++) device_remove_file(&client->dev, &fschmd_attr[i].dev_attr); for (i = 0; i < (FSCHMD_NO_TEMP_SENSORS[data->kind] * 4); i++) device_remove_file(&client->dev, &fschmd_temp_attr[i].dev_attr); for (i = 0; i < (FSCHMD_NO_FAN_SENSORS[data->kind] * 5); i++) device_remove_file(&client->dev, &fschmd_fan_attr[i].dev_attr); mutex_lock(&watchdog_data_mutex); kref_put(&data->kref, fschmd_release_resources); mutex_unlock(&watchdog_data_mutex); return 0; } static struct fschmd_data *fschmd_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct fschmd_data *data = i2c_get_clientdata(client); int i; mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + 2 * HZ) || !data->valid) { for (i = 0; i < FSCHMD_NO_TEMP_SENSORS[data->kind]; i++) { data->temp_act[i] = i2c_smbus_read_byte_data(client, FSCHMD_REG_TEMP_ACT[data->kind][i]); data->temp_status[i] = i2c_smbus_read_byte_data(client, FSCHMD_REG_TEMP_STATE[data->kind][i]); /* The fscpos doesn't have TEMP_LIMIT registers */ if (FSCHMD_REG_TEMP_LIMIT[data->kind][i]) data->temp_max[i] = i2c_smbus_read_byte_data( client, FSCHMD_REG_TEMP_LIMIT[data->kind][i]); /* * reset alarm if the alarm condition is gone, * the chip doesn't do this itself */ if ((data->temp_status[i] & FSCHMD_TEMP_ALARM_MASK) == FSCHMD_TEMP_ALARM_MASK && data->temp_act[i] < data->temp_max[i]) i2c_smbus_write_byte_data(client, FSCHMD_REG_TEMP_STATE[data->kind][i], data->temp_status[i]); } for (i = 0; i < FSCHMD_NO_FAN_SENSORS[data->kind]; i++) { data->fan_act[i] = i2c_smbus_read_byte_data(client, FSCHMD_REG_FAN_ACT[data->kind][i]); data->fan_status[i] = i2c_smbus_read_byte_data(client, FSCHMD_REG_FAN_STATE[data->kind][i]); data->fan_ripple[i] = i2c_smbus_read_byte_data(client, FSCHMD_REG_FAN_RIPPLE[data->kind][i]); /* The fscpos third fan doesn't have a fan_min */ if (FSCHMD_REG_FAN_MIN[data->kind][i]) data->fan_min[i] = i2c_smbus_read_byte_data( client, FSCHMD_REG_FAN_MIN[data->kind][i]); /* reset fan status if speed is back to > 0 */ if ((data->fan_status[i] & FSCHMD_FAN_ALARM) && data->fan_act[i]) i2c_smbus_write_byte_data(client, FSCHMD_REG_FAN_STATE[data->kind][i], data->fan_status[i]); } for (i = 0; i < FSCHMD_NO_VOLT_SENSORS[data->kind]; i++) data->volt[i] = i2c_smbus_read_byte_data(client, FSCHMD_REG_VOLT[data->kind][i]); data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } module_i2c_driver(fschmd_driver); MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); MODULE_DESCRIPTION("FSC Poseidon, Hermes, Scylla, Heracles, Heimdall, Hades " "and Syleus driver"); MODULE_LICENSE("GPL");
gpl-2.0
qltsar/g620s
drivers/pcmcia/sa1100_shannon.c
4616
2494
/* * drivers/pcmcia/sa1100_shannon.c * * PCMCIA implementation routines for Shannon * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/init.h> #include <linux/io.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <mach/shannon.h> #include <asm/irq.h> #include "sa1100_generic.h" static int shannon_pcmcia_hw_init(struct soc_pcmcia_socket *skt) { /* All those are inputs */ GAFR &= ~(GPIO_GPIO(SHANNON_GPIO_EJECT_0) | GPIO_GPIO(SHANNON_GPIO_EJECT_1) | GPIO_GPIO(SHANNON_GPIO_RDY_0) | GPIO_GPIO(SHANNON_GPIO_RDY_1)); if (skt->nr == 0) { skt->stat[SOC_STAT_CD].gpio = SHANNON_GPIO_EJECT_0; skt->stat[SOC_STAT_CD].name = "PCMCIA_CD_0"; skt->stat[SOC_STAT_RDY].gpio = SHANNON_GPIO_RDY_0; skt->stat[SOC_STAT_RDY].name = "PCMCIA_RDY_0"; } else { skt->stat[SOC_STAT_CD].gpio = SHANNON_GPIO_EJECT_1; skt->stat[SOC_STAT_CD].name = "PCMCIA_CD_1"; skt->stat[SOC_STAT_RDY].gpio = SHANNON_GPIO_RDY_1; skt->stat[SOC_STAT_RDY].name = "PCMCIA_RDY_1"; } return 0; } static void shannon_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_state *state) { switch (skt->nr) { case 0: state->bvd1 = 1; state->bvd2 = 1; state->vs_3v = 1; /* FIXME Can only apply 3.3V on Shannon. */ state->vs_Xv = 0; break; case 1: state->bvd1 = 1; state->bvd2 = 1; state->vs_3v = 1; /* FIXME Can only apply 3.3V on Shannon. */ state->vs_Xv = 0; break; } } static int shannon_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state) { switch (state->Vcc) { case 0: /* power off */ printk(KERN_WARNING "%s(): CS asked for 0V, still applying 3.3V..\n", __func__); break; case 50: printk(KERN_WARNING "%s(): CS asked for 5V, applying 3.3V..\n", __func__); case 33: break; default: printk(KERN_ERR "%s(): unrecognized Vcc %u\n", __func__, state->Vcc); return -1; } printk(KERN_WARNING "%s(): Warning, Can't perform reset\n", __func__); /* Silently ignore Vpp, output enable, speaker enable. */ return 0; } static struct pcmcia_low_level shannon_pcmcia_ops = { .owner = THIS_MODULE, .hw_init = shannon_pcmcia_hw_init, .socket_state = shannon_pcmcia_socket_state, .configure_socket = shannon_pcmcia_configure_socket, }; int pcmcia_shannon_init(struct device *dev) { int ret = -ENODEV; if (machine_is_shannon()) ret = sa11xx_drv_pcmcia_probe(dev, &shannon_pcmcia_ops, 0, 2); return ret; }
gpl-2.0
Rockr172/villeu-4.2
drivers/gpu/drm/drm_modes.c
4872
34461
/* * Copyright © 1997-2003 by The XFree86 Project, Inc. * Copyright © 2007 Dave Airlie * Copyright © 2007-2008 Intel Corporation * Jesse Barnes <jesse.barnes@intel.com> * Copyright 2005-2006 Luc Verhaegen * Copyright (c) 2001, Andy Ritger aritger@nvidia.com * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Except as contained in this notice, the name of the copyright holder(s) * and author(s) shall not be used in advertising or otherwise to promote * the sale, use or other dealings in this Software without prior written * authorization from the copyright holder(s) and author(s). */ #include <linux/list.h> #include <linux/list_sort.h> #include <linux/export.h> #include "drmP.h" #include "drm.h" #include "drm_crtc.h" /** * drm_mode_debug_printmodeline - debug print a mode * @dev: DRM device * @mode: mode to print * * LOCKING: * None. * * Describe @mode using DRM_DEBUG. */ void drm_mode_debug_printmodeline(struct drm_display_mode *mode) { DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d " "0x%x 0x%x\n", mode->base.id, mode->name, mode->vrefresh, mode->clock, mode->hdisplay, mode->hsync_start, mode->hsync_end, mode->htotal, mode->vdisplay, mode->vsync_start, mode->vsync_end, mode->vtotal, mode->type, mode->flags); } EXPORT_SYMBOL(drm_mode_debug_printmodeline); /** * drm_cvt_mode -create a modeline based on CVT algorithm * @dev: DRM device * @hdisplay: hdisplay size * @vdisplay: vdisplay size * @vrefresh : vrefresh rate * @reduced : Whether the GTF calculation is simplified * @interlaced:Whether the interlace is supported * * LOCKING: * none. * * return the modeline based on CVT algorithm * * This function is called to generate the modeline based on CVT algorithm * according to the hdisplay, vdisplay, vrefresh. * It is based from the VESA(TM) Coordinated Video Timing Generator by * Graham Loveridge April 9, 2003 available at * http://www.elo.utfsm.cl/~elo212/docs/CVTd6r1.xls * * And it is copied from xf86CVTmode in xserver/hw/xfree86/modes/xf86cvt.c. * What I have done is to translate it by using integer calculation. */ #define HV_FACTOR 1000 struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh, bool reduced, bool interlaced, bool margins) { /* 1) top/bottom margin size (% of height) - default: 1.8, */ #define CVT_MARGIN_PERCENTAGE 18 /* 2) character cell horizontal granularity (pixels) - default 8 */ #define CVT_H_GRANULARITY 8 /* 3) Minimum vertical porch (lines) - default 3 */ #define CVT_MIN_V_PORCH 3 /* 4) Minimum number of vertical back porch lines - default 6 */ #define CVT_MIN_V_BPORCH 6 /* Pixel Clock step (kHz) */ #define CVT_CLOCK_STEP 250 struct drm_display_mode *drm_mode; unsigned int vfieldrate, hperiod; int hdisplay_rnd, hmargin, vdisplay_rnd, vmargin, vsync; int interlace; /* allocate the drm_display_mode structure. If failure, we will * return directly */ drm_mode = drm_mode_create(dev); if (!drm_mode) return NULL; /* the CVT default refresh rate is 60Hz */ if (!vrefresh) vrefresh = 60; /* the required field fresh rate */ if (interlaced) vfieldrate = vrefresh * 2; else vfieldrate = vrefresh; /* horizontal pixels */ hdisplay_rnd = hdisplay - (hdisplay % CVT_H_GRANULARITY); /* determine the left&right borders */ hmargin = 0; if (margins) { hmargin = hdisplay_rnd * CVT_MARGIN_PERCENTAGE / 1000; hmargin -= hmargin % CVT_H_GRANULARITY; } /* find the total active pixels */ drm_mode->hdisplay = hdisplay_rnd + 2 * hmargin; /* find the number of lines per field */ if (interlaced) vdisplay_rnd = vdisplay / 2; else vdisplay_rnd = vdisplay; /* find the top & bottom borders */ vmargin = 0; if (margins) vmargin = vdisplay_rnd * CVT_MARGIN_PERCENTAGE / 1000; drm_mode->vdisplay = vdisplay + 2 * vmargin; /* Interlaced */ if (interlaced) interlace = 1; else interlace = 0; /* Determine VSync Width from aspect ratio */ if (!(vdisplay % 3) && ((vdisplay * 4 / 3) == hdisplay)) vsync = 4; else if (!(vdisplay % 9) && ((vdisplay * 16 / 9) == hdisplay)) vsync = 5; else if (!(vdisplay % 10) && ((vdisplay * 16 / 10) == hdisplay)) vsync = 6; else if (!(vdisplay % 4) && ((vdisplay * 5 / 4) == hdisplay)) vsync = 7; else if (!(vdisplay % 9) && ((vdisplay * 15 / 9) == hdisplay)) vsync = 7; else /* custom */ vsync = 10; if (!reduced) { /* simplify the GTF calculation */ /* 4) Minimum time of vertical sync + back porch interval (µs) * default 550.0 */ int tmp1, tmp2; #define CVT_MIN_VSYNC_BP 550 /* 3) Nominal HSync width (% of line period) - default 8 */ #define CVT_HSYNC_PERCENTAGE 8 unsigned int hblank_percentage; int vsyncandback_porch, vback_porch, hblank; /* estimated the horizontal period */ tmp1 = HV_FACTOR * 1000000 - CVT_MIN_VSYNC_BP * HV_FACTOR * vfieldrate; tmp2 = (vdisplay_rnd + 2 * vmargin + CVT_MIN_V_PORCH) * 2 + interlace; hperiod = tmp1 * 2 / (tmp2 * vfieldrate); tmp1 = CVT_MIN_VSYNC_BP * HV_FACTOR / hperiod + 1; /* 9. Find number of lines in sync + backporch */ if (tmp1 < (vsync + CVT_MIN_V_PORCH)) vsyncandback_porch = vsync + CVT_MIN_V_PORCH; else vsyncandback_porch = tmp1; /* 10. Find number of lines in back porch */ vback_porch = vsyncandback_porch - vsync; drm_mode->vtotal = vdisplay_rnd + 2 * vmargin + vsyncandback_porch + CVT_MIN_V_PORCH; /* 5) Definition of Horizontal blanking time limitation */ /* Gradient (%/kHz) - default 600 */ #define CVT_M_FACTOR 600 /* Offset (%) - default 40 */ #define CVT_C_FACTOR 40 /* Blanking time scaling factor - default 128 */ #define CVT_K_FACTOR 128 /* Scaling factor weighting - default 20 */ #define CVT_J_FACTOR 20 #define CVT_M_PRIME (CVT_M_FACTOR * CVT_K_FACTOR / 256) #define CVT_C_PRIME ((CVT_C_FACTOR - CVT_J_FACTOR) * CVT_K_FACTOR / 256 + \ CVT_J_FACTOR) /* 12. Find ideal blanking duty cycle from formula */ hblank_percentage = CVT_C_PRIME * HV_FACTOR - CVT_M_PRIME * hperiod / 1000; /* 13. Blanking time */ if (hblank_percentage < 20 * HV_FACTOR) hblank_percentage = 20 * HV_FACTOR; hblank = drm_mode->hdisplay * hblank_percentage / (100 * HV_FACTOR - hblank_percentage); hblank -= hblank % (2 * CVT_H_GRANULARITY); /* 14. find the total pixes per line */ drm_mode->htotal = drm_mode->hdisplay + hblank; drm_mode->hsync_end = drm_mode->hdisplay + hblank / 2; drm_mode->hsync_start = drm_mode->hsync_end - (drm_mode->htotal * CVT_HSYNC_PERCENTAGE) / 100; drm_mode->hsync_start += CVT_H_GRANULARITY - drm_mode->hsync_start % CVT_H_GRANULARITY; /* fill the Vsync values */ drm_mode->vsync_start = drm_mode->vdisplay + CVT_MIN_V_PORCH; drm_mode->vsync_end = drm_mode->vsync_start + vsync; } else { /* Reduced blanking */ /* Minimum vertical blanking interval time (µs)- default 460 */ #define CVT_RB_MIN_VBLANK 460 /* Fixed number of clocks for horizontal sync */ #define CVT_RB_H_SYNC 32 /* Fixed number of clocks for horizontal blanking */ #define CVT_RB_H_BLANK 160 /* Fixed number of lines for vertical front porch - default 3*/ #define CVT_RB_VFPORCH 3 int vbilines; int tmp1, tmp2; /* 8. Estimate Horizontal period. */ tmp1 = HV_FACTOR * 1000000 - CVT_RB_MIN_VBLANK * HV_FACTOR * vfieldrate; tmp2 = vdisplay_rnd + 2 * vmargin; hperiod = tmp1 / (tmp2 * vfieldrate); /* 9. Find number of lines in vertical blanking */ vbilines = CVT_RB_MIN_VBLANK * HV_FACTOR / hperiod + 1; /* 10. Check if vertical blanking is sufficient */ if (vbilines < (CVT_RB_VFPORCH + vsync + CVT_MIN_V_BPORCH)) vbilines = CVT_RB_VFPORCH + vsync + CVT_MIN_V_BPORCH; /* 11. Find total number of lines in vertical field */ drm_mode->vtotal = vdisplay_rnd + 2 * vmargin + vbilines; /* 12. Find total number of pixels in a line */ drm_mode->htotal = drm_mode->hdisplay + CVT_RB_H_BLANK; /* Fill in HSync values */ drm_mode->hsync_end = drm_mode->hdisplay + CVT_RB_H_BLANK / 2; drm_mode->hsync_start = drm_mode->hsync_end - CVT_RB_H_SYNC; /* Fill in VSync values */ drm_mode->vsync_start = drm_mode->vdisplay + CVT_RB_VFPORCH; drm_mode->vsync_end = drm_mode->vsync_start + vsync; } /* 15/13. Find pixel clock frequency (kHz for xf86) */ drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod; drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP; /* 18/16. Find actual vertical frame frequency */ /* ignore - just set the mode flag for interlaced */ if (interlaced) { drm_mode->vtotal *= 2; drm_mode->flags |= DRM_MODE_FLAG_INTERLACE; } /* Fill the mode line name */ drm_mode_set_name(drm_mode); if (reduced) drm_mode->flags |= (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC); else drm_mode->flags |= (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NHSYNC); return drm_mode; } EXPORT_SYMBOL(drm_cvt_mode); /** * drm_gtf_mode_complex - create the modeline based on full GTF algorithm * * @dev :drm device * @hdisplay :hdisplay size * @vdisplay :vdisplay size * @vrefresh :vrefresh rate. * @interlaced :whether the interlace is supported * @margins :desired margin size * @GTF_[MCKJ] :extended GTF formula parameters * * LOCKING. * none. * * return the modeline based on full GTF algorithm. * * GTF feature blocks specify C and J in multiples of 0.5, so we pass them * in here multiplied by two. For a C of 40, pass in 80. */ struct drm_display_mode * drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh, bool interlaced, int margins, int GTF_M, int GTF_2C, int GTF_K, int GTF_2J) { /* 1) top/bottom margin size (% of height) - default: 1.8, */ #define GTF_MARGIN_PERCENTAGE 18 /* 2) character cell horizontal granularity (pixels) - default 8 */ #define GTF_CELL_GRAN 8 /* 3) Minimum vertical porch (lines) - default 3 */ #define GTF_MIN_V_PORCH 1 /* width of vsync in lines */ #define V_SYNC_RQD 3 /* width of hsync as % of total line */ #define H_SYNC_PERCENT 8 /* min time of vsync + back porch (microsec) */ #define MIN_VSYNC_PLUS_BP 550 /* C' and M' are part of the Blanking Duty Cycle computation */ #define GTF_C_PRIME ((((GTF_2C - GTF_2J) * GTF_K / 256) + GTF_2J) / 2) #define GTF_M_PRIME (GTF_K * GTF_M / 256) struct drm_display_mode *drm_mode; unsigned int hdisplay_rnd, vdisplay_rnd, vfieldrate_rqd; int top_margin, bottom_margin; int interlace; unsigned int hfreq_est; int vsync_plus_bp, vback_porch; unsigned int vtotal_lines, vfieldrate_est, hperiod; unsigned int vfield_rate, vframe_rate; int left_margin, right_margin; unsigned int total_active_pixels, ideal_duty_cycle; unsigned int hblank, total_pixels, pixel_freq; int hsync, hfront_porch, vodd_front_porch_lines; unsigned int tmp1, tmp2; drm_mode = drm_mode_create(dev); if (!drm_mode) return NULL; /* 1. In order to give correct results, the number of horizontal * pixels requested is first processed to ensure that it is divisible * by the character size, by rounding it to the nearest character * cell boundary: */ hdisplay_rnd = (hdisplay + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN; hdisplay_rnd = hdisplay_rnd * GTF_CELL_GRAN; /* 2. If interlace is requested, the number of vertical lines assumed * by the calculation must be halved, as the computation calculates * the number of vertical lines per field. */ if (interlaced) vdisplay_rnd = vdisplay / 2; else vdisplay_rnd = vdisplay; /* 3. Find the frame rate required: */ if (interlaced) vfieldrate_rqd = vrefresh * 2; else vfieldrate_rqd = vrefresh; /* 4. Find number of lines in Top margin: */ top_margin = 0; if (margins) top_margin = (vdisplay_rnd * GTF_MARGIN_PERCENTAGE + 500) / 1000; /* 5. Find number of lines in bottom margin: */ bottom_margin = top_margin; /* 6. If interlace is required, then set variable interlace: */ if (interlaced) interlace = 1; else interlace = 0; /* 7. Estimate the Horizontal frequency */ { tmp1 = (1000000 - MIN_VSYNC_PLUS_BP * vfieldrate_rqd) / 500; tmp2 = (vdisplay_rnd + 2 * top_margin + GTF_MIN_V_PORCH) * 2 + interlace; hfreq_est = (tmp2 * 1000 * vfieldrate_rqd) / tmp1; } /* 8. Find the number of lines in V sync + back porch */ /* [V SYNC+BP] = RINT(([MIN VSYNC+BP] * hfreq_est / 1000000)) */ vsync_plus_bp = MIN_VSYNC_PLUS_BP * hfreq_est / 1000; vsync_plus_bp = (vsync_plus_bp + 500) / 1000; /* 9. Find the number of lines in V back porch alone: */ vback_porch = vsync_plus_bp - V_SYNC_RQD; /* 10. Find the total number of lines in Vertical field period: */ vtotal_lines = vdisplay_rnd + top_margin + bottom_margin + vsync_plus_bp + GTF_MIN_V_PORCH; /* 11. Estimate the Vertical field frequency: */ vfieldrate_est = hfreq_est / vtotal_lines; /* 12. Find the actual horizontal period: */ hperiod = 1000000 / (vfieldrate_rqd * vtotal_lines); /* 13. Find the actual Vertical field frequency: */ vfield_rate = hfreq_est / vtotal_lines; /* 14. Find the Vertical frame frequency: */ if (interlaced) vframe_rate = vfield_rate / 2; else vframe_rate = vfield_rate; /* 15. Find number of pixels in left margin: */ if (margins) left_margin = (hdisplay_rnd * GTF_MARGIN_PERCENTAGE + 500) / 1000; else left_margin = 0; /* 16.Find number of pixels in right margin: */ right_margin = left_margin; /* 17.Find total number of active pixels in image and left and right */ total_active_pixels = hdisplay_rnd + left_margin + right_margin; /* 18.Find the ideal blanking duty cycle from blanking duty cycle */ ideal_duty_cycle = GTF_C_PRIME * 1000 - (GTF_M_PRIME * 1000000 / hfreq_est); /* 19.Find the number of pixels in the blanking time to the nearest * double character cell: */ hblank = total_active_pixels * ideal_duty_cycle / (100000 - ideal_duty_cycle); hblank = (hblank + GTF_CELL_GRAN) / (2 * GTF_CELL_GRAN); hblank = hblank * 2 * GTF_CELL_GRAN; /* 20.Find total number of pixels: */ total_pixels = total_active_pixels + hblank; /* 21.Find pixel clock frequency: */ pixel_freq = total_pixels * hfreq_est / 1000; /* Stage 1 computations are now complete; I should really pass * the results to another function and do the Stage 2 computations, * but I only need a few more values so I'll just append the * computations here for now */ /* 17. Find the number of pixels in the horizontal sync period: */ hsync = H_SYNC_PERCENT * total_pixels / 100; hsync = (hsync + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN; hsync = hsync * GTF_CELL_GRAN; /* 18. Find the number of pixels in horizontal front porch period */ hfront_porch = hblank / 2 - hsync; /* 36. Find the number of lines in the odd front porch period: */ vodd_front_porch_lines = GTF_MIN_V_PORCH ; /* finally, pack the results in the mode struct */ drm_mode->hdisplay = hdisplay_rnd; drm_mode->hsync_start = hdisplay_rnd + hfront_porch; drm_mode->hsync_end = drm_mode->hsync_start + hsync; drm_mode->htotal = total_pixels; drm_mode->vdisplay = vdisplay_rnd; drm_mode->vsync_start = vdisplay_rnd + vodd_front_porch_lines; drm_mode->vsync_end = drm_mode->vsync_start + V_SYNC_RQD; drm_mode->vtotal = vtotal_lines; drm_mode->clock = pixel_freq; if (interlaced) { drm_mode->vtotal *= 2; drm_mode->flags |= DRM_MODE_FLAG_INTERLACE; } drm_mode_set_name(drm_mode); if (GTF_M == 600 && GTF_2C == 80 && GTF_K == 128 && GTF_2J == 40) drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC; else drm_mode->flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC; return drm_mode; } EXPORT_SYMBOL(drm_gtf_mode_complex); /** * drm_gtf_mode - create the modeline based on GTF algorithm * * @dev :drm device * @hdisplay :hdisplay size * @vdisplay :vdisplay size * @vrefresh :vrefresh rate. * @interlaced :whether the interlace is supported * @margins :whether the margin is supported * * LOCKING. * none. * * return the modeline based on GTF algorithm * * This function is to create the modeline based on the GTF algorithm. * Generalized Timing Formula is derived from: * GTF Spreadsheet by Andy Morrish (1/5/97) * available at http://www.vesa.org * * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c. * What I have done is to translate it by using integer calculation. * I also refer to the function of fb_get_mode in the file of * drivers/video/fbmon.c * * Standard GTF parameters: * M = 600 * C = 40 * K = 128 * J = 20 */ struct drm_display_mode * drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh, bool lace, int margins) { return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh, lace, margins, 600, 40 * 2, 128, 20 * 2); } EXPORT_SYMBOL(drm_gtf_mode); /** * drm_mode_set_name - set the name on a mode * @mode: name will be set in this mode * * LOCKING: * None. * * Set the name of @mode to a standard format. */ void drm_mode_set_name(struct drm_display_mode *mode) { bool interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE); snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d%s", mode->hdisplay, mode->vdisplay, interlaced ? "i" : ""); } EXPORT_SYMBOL(drm_mode_set_name); /** * drm_mode_list_concat - move modes from one list to another * @head: source list * @new: dst list * * LOCKING: * Caller must ensure both lists are locked. * * Move all the modes from @head to @new. */ void drm_mode_list_concat(struct list_head *head, struct list_head *new) { struct list_head *entry, *tmp; list_for_each_safe(entry, tmp, head) { list_move_tail(entry, new); } } EXPORT_SYMBOL(drm_mode_list_concat); /** * drm_mode_width - get the width of a mode * @mode: mode * * LOCKING: * None. * * Return @mode's width (hdisplay) value. * * FIXME: is this needed? * * RETURNS: * @mode->hdisplay */ int drm_mode_width(struct drm_display_mode *mode) { return mode->hdisplay; } EXPORT_SYMBOL(drm_mode_width); /** * drm_mode_height - get the height of a mode * @mode: mode * * LOCKING: * None. * * Return @mode's height (vdisplay) value. * * FIXME: is this needed? * * RETURNS: * @mode->vdisplay */ int drm_mode_height(struct drm_display_mode *mode) { return mode->vdisplay; } EXPORT_SYMBOL(drm_mode_height); /** drm_mode_hsync - get the hsync of a mode * @mode: mode * * LOCKING: * None. * * Return @modes's hsync rate in kHz, rounded to the nearest int. */ int drm_mode_hsync(const struct drm_display_mode *mode) { unsigned int calc_val; if (mode->hsync) return mode->hsync; if (mode->htotal < 0) return 0; calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */ calc_val += 500; /* round to 1000Hz */ calc_val /= 1000; /* truncate to kHz */ return calc_val; } EXPORT_SYMBOL(drm_mode_hsync); /** * drm_mode_vrefresh - get the vrefresh of a mode * @mode: mode * * LOCKING: * None. * * Return @mode's vrefresh rate in Hz or calculate it if necessary. * * FIXME: why is this needed? shouldn't vrefresh be set already? * * RETURNS: * Vertical refresh rate. It will be the result of actual value plus 0.5. * If it is 70.288, it will return 70Hz. * If it is 59.6, it will return 60Hz. */ int drm_mode_vrefresh(const struct drm_display_mode *mode) { int refresh = 0; unsigned int calc_val; if (mode->vrefresh > 0) refresh = mode->vrefresh; else if (mode->htotal > 0 && mode->vtotal > 0) { int vtotal; vtotal = mode->vtotal; /* work out vrefresh the value will be x1000 */ calc_val = (mode->clock * 1000); calc_val /= mode->htotal; refresh = (calc_val + vtotal / 2) / vtotal; if (mode->flags & DRM_MODE_FLAG_INTERLACE) refresh *= 2; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) refresh /= 2; if (mode->vscan > 1) refresh /= mode->vscan; } return refresh; } EXPORT_SYMBOL(drm_mode_vrefresh); /** * drm_mode_set_crtcinfo - set CRTC modesetting parameters * @p: mode * @adjust_flags: unused? (FIXME) * * LOCKING: * None. * * Setup the CRTC modesetting parameters for @p, adjusting if necessary. */ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags) { if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN)) return; p->crtc_hdisplay = p->hdisplay; p->crtc_hsync_start = p->hsync_start; p->crtc_hsync_end = p->hsync_end; p->crtc_htotal = p->htotal; p->crtc_hskew = p->hskew; p->crtc_vdisplay = p->vdisplay; p->crtc_vsync_start = p->vsync_start; p->crtc_vsync_end = p->vsync_end; p->crtc_vtotal = p->vtotal; if (p->flags & DRM_MODE_FLAG_INTERLACE) { if (adjust_flags & CRTC_INTERLACE_HALVE_V) { p->crtc_vdisplay /= 2; p->crtc_vsync_start /= 2; p->crtc_vsync_end /= 2; p->crtc_vtotal /= 2; } } if (p->flags & DRM_MODE_FLAG_DBLSCAN) { p->crtc_vdisplay *= 2; p->crtc_vsync_start *= 2; p->crtc_vsync_end *= 2; p->crtc_vtotal *= 2; } if (p->vscan > 1) { p->crtc_vdisplay *= p->vscan; p->crtc_vsync_start *= p->vscan; p->crtc_vsync_end *= p->vscan; p->crtc_vtotal *= p->vscan; } p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay); p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal); p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay); p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal); p->crtc_hadjusted = false; p->crtc_vadjusted = false; } EXPORT_SYMBOL(drm_mode_set_crtcinfo); /** * drm_mode_copy - copy the mode * @dst: mode to overwrite * @src: mode to copy * * LOCKING: * None. * * Copy an existing mode into another mode, preserving the object id * of the destination mode. */ void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src) { int id = dst->base.id; *dst = *src; dst->base.id = id; INIT_LIST_HEAD(&dst->head); } EXPORT_SYMBOL(drm_mode_copy); /** * drm_mode_duplicate - allocate and duplicate an existing mode * @m: mode to duplicate * * LOCKING: * None. * * Just allocate a new mode, copy the existing mode into it, and return * a pointer to it. Used to create new instances of established modes. */ struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev, const struct drm_display_mode *mode) { struct drm_display_mode *nmode; nmode = drm_mode_create(dev); if (!nmode) return NULL; drm_mode_copy(nmode, mode); return nmode; } EXPORT_SYMBOL(drm_mode_duplicate); /** * drm_mode_equal - test modes for equality * @mode1: first mode * @mode2: second mode * * LOCKING: * None. * * Check to see if @mode1 and @mode2 are equivalent. * * RETURNS: * True if the modes are equal, false otherwise. */ bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2) { /* do clock check convert to PICOS so fb modes get matched * the same */ if (mode1->clock && mode2->clock) { if (KHZ2PICOS(mode1->clock) != KHZ2PICOS(mode2->clock)) return false; } else if (mode1->clock != mode2->clock) return false; if (mode1->hdisplay == mode2->hdisplay && mode1->hsync_start == mode2->hsync_start && mode1->hsync_end == mode2->hsync_end && mode1->htotal == mode2->htotal && mode1->hskew == mode2->hskew && mode1->vdisplay == mode2->vdisplay && mode1->vsync_start == mode2->vsync_start && mode1->vsync_end == mode2->vsync_end && mode1->vtotal == mode2->vtotal && mode1->vscan == mode2->vscan && mode1->flags == mode2->flags) return true; return false; } EXPORT_SYMBOL(drm_mode_equal); /** * drm_mode_validate_size - make sure modes adhere to size constraints * @dev: DRM device * @mode_list: list of modes to check * @maxX: maximum width * @maxY: maximum height * @maxPitch: max pitch * * LOCKING: * Caller must hold a lock protecting @mode_list. * * The DRM device (@dev) has size and pitch limits. Here we validate the * modes we probed for @dev against those limits and set their status as * necessary. */ void drm_mode_validate_size(struct drm_device *dev, struct list_head *mode_list, int maxX, int maxY, int maxPitch) { struct drm_display_mode *mode; list_for_each_entry(mode, mode_list, head) { if (maxPitch > 0 && mode->hdisplay > maxPitch) mode->status = MODE_BAD_WIDTH; if (maxX > 0 && mode->hdisplay > maxX) mode->status = MODE_VIRTUAL_X; if (maxY > 0 && mode->vdisplay > maxY) mode->status = MODE_VIRTUAL_Y; } } EXPORT_SYMBOL(drm_mode_validate_size); /** * drm_mode_validate_clocks - validate modes against clock limits * @dev: DRM device * @mode_list: list of modes to check * @min: minimum clock rate array * @max: maximum clock rate array * @n_ranges: number of clock ranges (size of arrays) * * LOCKING: * Caller must hold a lock protecting @mode_list. * * Some code may need to check a mode list against the clock limits of the * device in question. This function walks the mode list, testing to make * sure each mode falls within a given range (defined by @min and @max * arrays) and sets @mode->status as needed. */ void drm_mode_validate_clocks(struct drm_device *dev, struct list_head *mode_list, int *min, int *max, int n_ranges) { struct drm_display_mode *mode; int i; list_for_each_entry(mode, mode_list, head) { bool good = false; for (i = 0; i < n_ranges; i++) { if (mode->clock >= min[i] && mode->clock <= max[i]) { good = true; break; } } if (!good) mode->status = MODE_CLOCK_RANGE; } } EXPORT_SYMBOL(drm_mode_validate_clocks); /** * drm_mode_prune_invalid - remove invalid modes from mode list * @dev: DRM device * @mode_list: list of modes to check * @verbose: be verbose about it * * LOCKING: * Caller must hold a lock protecting @mode_list. * * Once mode list generation is complete, a caller can use this routine to * remove invalid modes from a mode list. If any of the modes have a * status other than %MODE_OK, they are removed from @mode_list and freed. */ void drm_mode_prune_invalid(struct drm_device *dev, struct list_head *mode_list, bool verbose) { struct drm_display_mode *mode, *t; list_for_each_entry_safe(mode, t, mode_list, head) { if (mode->status != MODE_OK) { list_del(&mode->head); if (verbose) { drm_mode_debug_printmodeline(mode); DRM_DEBUG_KMS("Not using %s mode %d\n", mode->name, mode->status); } drm_mode_destroy(dev, mode); } } } EXPORT_SYMBOL(drm_mode_prune_invalid); /** * drm_mode_compare - compare modes for favorability * @priv: unused * @lh_a: list_head for first mode * @lh_b: list_head for second mode * * LOCKING: * None. * * Compare two modes, given by @lh_a and @lh_b, returning a value indicating * which is better. * * RETURNS: * Negative if @lh_a is better than @lh_b, zero if they're equivalent, or * positive if @lh_b is better than @lh_a. */ static int drm_mode_compare(void *priv, struct list_head *lh_a, struct list_head *lh_b) { struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head); struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head); int diff; diff = ((b->type & DRM_MODE_TYPE_PREFERRED) != 0) - ((a->type & DRM_MODE_TYPE_PREFERRED) != 0); if (diff) return diff; diff = b->hdisplay * b->vdisplay - a->hdisplay * a->vdisplay; if (diff) return diff; diff = b->clock - a->clock; return diff; } /** * drm_mode_sort - sort mode list * @mode_list: list to sort * * LOCKING: * Caller must hold a lock protecting @mode_list. * * Sort @mode_list by favorability, putting good modes first. */ void drm_mode_sort(struct list_head *mode_list) { list_sort(NULL, mode_list, drm_mode_compare); } EXPORT_SYMBOL(drm_mode_sort); /** * drm_mode_connector_list_update - update the mode list for the connector * @connector: the connector to update * * LOCKING: * Caller must hold a lock protecting @mode_list. * * This moves the modes from the @connector probed_modes list * to the actual mode list. It compares the probed mode against the current * list and only adds different modes. All modes unverified after this point * will be removed by the prune invalid modes. */ void drm_mode_connector_list_update(struct drm_connector *connector) { struct drm_display_mode *mode; struct drm_display_mode *pmode, *pt; int found_it; list_for_each_entry_safe(pmode, pt, &connector->probed_modes, head) { found_it = 0; /* go through current modes checking for the new probed mode */ list_for_each_entry(mode, &connector->modes, head) { if (drm_mode_equal(pmode, mode)) { found_it = 1; /* if equal delete the probed mode */ mode->status = pmode->status; /* Merge type bits together */ mode->type |= pmode->type; list_del(&pmode->head); drm_mode_destroy(connector->dev, pmode); break; } } if (!found_it) { list_move_tail(&pmode->head, &connector->modes); } } } EXPORT_SYMBOL(drm_mode_connector_list_update); /** * drm_mode_parse_command_line_for_connector - parse command line for connector * @mode_option - per connector mode option * @connector - connector to parse line for * * This parses the connector specific then generic command lines for * modes and options to configure the connector. * * This uses the same parameters as the fb modedb.c, except for extra * <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd] * * enable/enable Digital/disable bit at the end */ bool drm_mode_parse_command_line_for_connector(const char *mode_option, struct drm_connector *connector, struct drm_cmdline_mode *mode) { const char *name; unsigned int namelen; bool res_specified = false, bpp_specified = false, refresh_specified = false; unsigned int xres = 0, yres = 0, bpp = 32, refresh = 0; bool yres_specified = false, cvt = false, rb = false; bool interlace = false, margins = false, was_digit = false; int i; enum drm_connector_force force = DRM_FORCE_UNSPECIFIED; #ifdef CONFIG_FB if (!mode_option) mode_option = fb_mode_option; #endif if (!mode_option) { mode->specified = false; return false; } name = mode_option; namelen = strlen(name); for (i = namelen-1; i >= 0; i--) { switch (name[i]) { case '@': if (!refresh_specified && !bpp_specified && !yres_specified && !cvt && !rb && was_digit) { refresh = simple_strtol(&name[i+1], NULL, 10); refresh_specified = true; was_digit = false; } else goto done; break; case '-': if (!bpp_specified && !yres_specified && !cvt && !rb && was_digit) { bpp = simple_strtol(&name[i+1], NULL, 10); bpp_specified = true; was_digit = false; } else goto done; break; case 'x': if (!yres_specified && was_digit) { yres = simple_strtol(&name[i+1], NULL, 10); yres_specified = true; was_digit = false; } else goto done; case '0' ... '9': was_digit = true; break; case 'M': if (yres_specified || cvt || was_digit) goto done; cvt = true; break; case 'R': if (yres_specified || cvt || rb || was_digit) goto done; rb = true; break; case 'm': if (cvt || yres_specified || was_digit) goto done; margins = true; break; case 'i': if (cvt || yres_specified || was_digit) goto done; interlace = true; break; case 'e': if (yres_specified || bpp_specified || refresh_specified || was_digit || (force != DRM_FORCE_UNSPECIFIED)) goto done; force = DRM_FORCE_ON; break; case 'D': if (yres_specified || bpp_specified || refresh_specified || was_digit || (force != DRM_FORCE_UNSPECIFIED)) goto done; if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) && (connector->connector_type != DRM_MODE_CONNECTOR_HDMIB)) force = DRM_FORCE_ON; else force = DRM_FORCE_ON_DIGITAL; break; case 'd': if (yres_specified || bpp_specified || refresh_specified || was_digit || (force != DRM_FORCE_UNSPECIFIED)) goto done; force = DRM_FORCE_OFF; break; default: goto done; } } if (i < 0 && yres_specified) { char *ch; xres = simple_strtol(name, &ch, 10); if ((ch != NULL) && (*ch == 'x')) res_specified = true; else i = ch - name; } else if (!yres_specified && was_digit) { /* catch mode that begins with digits but has no 'x' */ i = 0; } done: if (i >= 0) { printk(KERN_WARNING "parse error at position %i in video mode '%s'\n", i, name); mode->specified = false; return false; } if (res_specified) { mode->specified = true; mode->xres = xres; mode->yres = yres; } if (refresh_specified) { mode->refresh_specified = true; mode->refresh = refresh; } if (bpp_specified) { mode->bpp_specified = true; mode->bpp = bpp; } mode->rb = rb; mode->cvt = cvt; mode->interlace = interlace; mode->margins = margins; mode->force = force; return true; } EXPORT_SYMBOL(drm_mode_parse_command_line_for_connector); struct drm_display_mode * drm_mode_create_from_cmdline_mode(struct drm_device *dev, struct drm_cmdline_mode *cmd) { struct drm_display_mode *mode; if (cmd->cvt) mode = drm_cvt_mode(dev, cmd->xres, cmd->yres, cmd->refresh_specified ? cmd->refresh : 60, cmd->rb, cmd->interlace, cmd->margins); else mode = drm_gtf_mode(dev, cmd->xres, cmd->yres, cmd->refresh_specified ? cmd->refresh : 60, cmd->interlace, cmd->margins); if (!mode) return NULL; drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); return mode; } EXPORT_SYMBOL(drm_mode_create_from_cmdline_mode);
gpl-2.0
drgogeta86/android_kernel_asus_flo
drivers/usb/otg/ulpi.c
5896
6465
/* * Generic ULPI USB transceiver support * * Copyright (C) 2009 Daniel Mack <daniel@caiaq.de> * * Based on sources from * * Sascha Hauer <s.hauer@pengutronix.de> * Freescale Semiconductors * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/export.h> #include <linux/usb.h> #include <linux/usb/otg.h> #include <linux/usb/ulpi.h> struct ulpi_info { unsigned int id; char *name; }; #define ULPI_ID(vendor, product) (((vendor) << 16) | (product)) #define ULPI_INFO(_id, _name) \ { \ .id = (_id), \ .name = (_name), \ } /* ULPI hardcoded IDs, used for probing */ static struct ulpi_info ulpi_ids[] = { ULPI_INFO(ULPI_ID(0x04cc, 0x1504), "NXP ISP1504"), ULPI_INFO(ULPI_ID(0x0424, 0x0006), "SMSC USB331x"), }; static int ulpi_set_otg_flags(struct usb_phy *phy) { unsigned int flags = ULPI_OTG_CTRL_DP_PULLDOWN | ULPI_OTG_CTRL_DM_PULLDOWN; if (phy->flags & ULPI_OTG_ID_PULLUP) flags |= ULPI_OTG_CTRL_ID_PULLUP; /* * ULPI Specification rev.1.1 default * for Dp/DmPulldown is enabled. */ if (phy->flags & ULPI_OTG_DP_PULLDOWN_DIS) flags &= ~ULPI_OTG_CTRL_DP_PULLDOWN; if (phy->flags & ULPI_OTG_DM_PULLDOWN_DIS) flags &= ~ULPI_OTG_CTRL_DM_PULLDOWN; if (phy->flags & ULPI_OTG_EXTVBUSIND) flags |= ULPI_OTG_CTRL_EXTVBUSIND; return usb_phy_io_write(phy, flags, ULPI_OTG_CTRL); } static int ulpi_set_fc_flags(struct usb_phy *phy) { unsigned int flags = 0; /* * ULPI Specification rev.1.1 default * for XcvrSelect is Full Speed. */ if (phy->flags & ULPI_FC_HS) flags |= ULPI_FUNC_CTRL_HIGH_SPEED; else if (phy->flags & ULPI_FC_LS) flags |= ULPI_FUNC_CTRL_LOW_SPEED; else if (phy->flags & ULPI_FC_FS4LS) flags |= ULPI_FUNC_CTRL_FS4LS; else flags |= ULPI_FUNC_CTRL_FULL_SPEED; if (phy->flags & ULPI_FC_TERMSEL) flags |= ULPI_FUNC_CTRL_TERMSELECT; /* * ULPI Specification rev.1.1 default * for OpMode is Normal Operation. */ if (phy->flags & ULPI_FC_OP_NODRV) flags |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING; else if (phy->flags & ULPI_FC_OP_DIS_NRZI) flags |= ULPI_FUNC_CTRL_OPMODE_DISABLE_NRZI; else if (phy->flags & ULPI_FC_OP_NSYNC_NEOP) flags |= ULPI_FUNC_CTRL_OPMODE_NOSYNC_NOEOP; else flags |= ULPI_FUNC_CTRL_OPMODE_NORMAL; /* * ULPI Specification rev.1.1 default * for SuspendM is Powered. */ flags |= ULPI_FUNC_CTRL_SUSPENDM; return usb_phy_io_write(phy, flags, ULPI_FUNC_CTRL); } static int ulpi_set_ic_flags(struct usb_phy *phy) { unsigned int flags = 0; if (phy->flags & ULPI_IC_AUTORESUME) flags |= ULPI_IFC_CTRL_AUTORESUME; if (phy->flags & ULPI_IC_EXTVBUS_INDINV) flags |= ULPI_IFC_CTRL_EXTERNAL_VBUS; if (phy->flags & ULPI_IC_IND_PASSTHRU) flags |= ULPI_IFC_CTRL_PASSTHRU; if (phy->flags & ULPI_IC_PROTECT_DIS) flags |= ULPI_IFC_CTRL_PROTECT_IFC_DISABLE; return usb_phy_io_write(phy, flags, ULPI_IFC_CTRL); } static int ulpi_set_flags(struct usb_phy *phy) { int ret; ret = ulpi_set_otg_flags(phy); if (ret) return ret; ret = ulpi_set_ic_flags(phy); if (ret) return ret; return ulpi_set_fc_flags(phy); } static int ulpi_check_integrity(struct usb_phy *phy) { int ret, i; unsigned int val = 0x55; for (i = 0; i < 2; i++) { ret = usb_phy_io_write(phy, val, ULPI_SCRATCH); if (ret < 0) return ret; ret = usb_phy_io_read(phy, ULPI_SCRATCH); if (ret < 0) return ret; if (ret != val) { pr_err("ULPI integrity check: failed!"); return -ENODEV; } val = val << 1; } pr_info("ULPI integrity check: passed.\n"); return 0; } static int ulpi_init(struct usb_phy *phy) { int i, vid, pid, ret; u32 ulpi_id = 0; for (i = 0; i < 4; i++) { ret = usb_phy_io_read(phy, ULPI_PRODUCT_ID_HIGH - i); if (ret < 0) return ret; ulpi_id = (ulpi_id << 8) | ret; } vid = ulpi_id & 0xffff; pid = ulpi_id >> 16; pr_info("ULPI transceiver vendor/product ID 0x%04x/0x%04x\n", vid, pid); for (i = 0; i < ARRAY_SIZE(ulpi_ids); i++) { if (ulpi_ids[i].id == ULPI_ID(vid, pid)) { pr_info("Found %s ULPI transceiver.\n", ulpi_ids[i].name); break; } } ret = ulpi_check_integrity(phy); if (ret) return ret; return ulpi_set_flags(phy); } static int ulpi_set_host(struct usb_otg *otg, struct usb_bus *host) { struct usb_phy *phy = otg->phy; unsigned int flags = usb_phy_io_read(phy, ULPI_IFC_CTRL); if (!host) { otg->host = NULL; return 0; } otg->host = host; flags &= ~(ULPI_IFC_CTRL_6_PIN_SERIAL_MODE | ULPI_IFC_CTRL_3_PIN_SERIAL_MODE | ULPI_IFC_CTRL_CARKITMODE); if (phy->flags & ULPI_IC_6PIN_SERIAL) flags |= ULPI_IFC_CTRL_6_PIN_SERIAL_MODE; else if (phy->flags & ULPI_IC_3PIN_SERIAL) flags |= ULPI_IFC_CTRL_3_PIN_SERIAL_MODE; else if (phy->flags & ULPI_IC_CARKIT) flags |= ULPI_IFC_CTRL_CARKITMODE; return usb_phy_io_write(phy, flags, ULPI_IFC_CTRL); } static int ulpi_set_vbus(struct usb_otg *otg, bool on) { struct usb_phy *phy = otg->phy; unsigned int flags = usb_phy_io_read(phy, ULPI_OTG_CTRL); flags &= ~(ULPI_OTG_CTRL_DRVVBUS | ULPI_OTG_CTRL_DRVVBUS_EXT); if (on) { if (phy->flags & ULPI_OTG_DRVVBUS) flags |= ULPI_OTG_CTRL_DRVVBUS; if (phy->flags & ULPI_OTG_DRVVBUS_EXT) flags |= ULPI_OTG_CTRL_DRVVBUS_EXT; } return usb_phy_io_write(phy, flags, ULPI_OTG_CTRL); } struct usb_phy * otg_ulpi_create(struct usb_phy_io_ops *ops, unsigned int flags) { struct usb_phy *phy; struct usb_otg *otg; phy = kzalloc(sizeof(*phy), GFP_KERNEL); if (!phy) return NULL; otg = kzalloc(sizeof(*otg), GFP_KERNEL); if (!otg) { kfree(phy); return NULL; } phy->label = "ULPI"; phy->flags = flags; phy->io_ops = ops; phy->otg = otg; phy->init = ulpi_init; otg->phy = phy; otg->set_host = ulpi_set_host; otg->set_vbus = ulpi_set_vbus; return phy; } EXPORT_SYMBOL_GPL(otg_ulpi_create);
gpl-2.0
weizhenwei/mi1_kernel
net/netfilter/xt_connlimit.c
7688
8223
/* * netfilter module to limit the number of parallel tcp * connections per IP address. * (c) 2000 Gerd Knorr <kraxel@bytesex.org> * Nov 2002: Martin Bene <martin.bene@icomedias.com>: * only ignore TIME_WAIT or gone connections * (C) CC Computer Consultants GmbH, 2007 * * based on ... * * Kernel module to match connection tracking information. * GPL (C) 1999 Rusty Russell (rusty@rustcorp.com.au). */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/in.h> #include <linux/in6.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/jhash.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/module.h> #include <linux/random.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/netfilter/nf_conntrack_tcp.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_connlimit.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_core.h> #include <net/netfilter/nf_conntrack_tuple.h> #include <net/netfilter/nf_conntrack_zones.h> /* we will save the tuples of all connections we care about */ struct xt_connlimit_conn { struct hlist_node node; struct nf_conntrack_tuple tuple; union nf_inet_addr addr; }; struct xt_connlimit_data { struct hlist_head iphash[256]; spinlock_t lock; }; static u_int32_t connlimit_rnd __read_mostly; static inline unsigned int connlimit_iphash(__be32 addr) { return jhash_1word((__force __u32)addr, connlimit_rnd) & 0xFF; } static inline unsigned int connlimit_iphash6(const union nf_inet_addr *addr, const union nf_inet_addr *mask) { union nf_inet_addr res; unsigned int i; for (i = 0; i < ARRAY_SIZE(addr->ip6); ++i) res.ip6[i] = addr->ip6[i] & mask->ip6[i]; return jhash2((u32 *)res.ip6, ARRAY_SIZE(res.ip6), connlimit_rnd) & 0xFF; } static inline bool already_closed(const struct nf_conn *conn) { if (nf_ct_protonum(conn) == IPPROTO_TCP) return conn->proto.tcp.state == TCP_CONNTRACK_TIME_WAIT || conn->proto.tcp.state == TCP_CONNTRACK_CLOSE; else return 0; } static inline unsigned int same_source_net(const union nf_inet_addr *addr, const union nf_inet_addr *mask, const union nf_inet_addr *u3, u_int8_t family) { if (family == NFPROTO_IPV4) { return (addr->ip & mask->ip) == (u3->ip & mask->ip); } else { union nf_inet_addr lh, rh; unsigned int i; for (i = 0; i < ARRAY_SIZE(addr->ip6); ++i) { lh.ip6[i] = addr->ip6[i] & mask->ip6[i]; rh.ip6[i] = u3->ip6[i] & mask->ip6[i]; } return memcmp(&lh.ip6, &rh.ip6, sizeof(lh.ip6)) == 0; } } static int count_them(struct net *net, struct xt_connlimit_data *data, const struct nf_conntrack_tuple *tuple, const union nf_inet_addr *addr, const union nf_inet_addr *mask, u_int8_t family) { const struct nf_conntrack_tuple_hash *found; struct xt_connlimit_conn *conn; struct hlist_node *pos, *n; struct nf_conn *found_ct; struct hlist_head *hash; bool addit = true; int matches = 0; if (family == NFPROTO_IPV6) hash = &data->iphash[connlimit_iphash6(addr, mask)]; else hash = &data->iphash[connlimit_iphash(addr->ip & mask->ip)]; rcu_read_lock(); /* check the saved connections */ hlist_for_each_entry_safe(conn, pos, n, hash, node) { found = nf_conntrack_find_get(net, NF_CT_DEFAULT_ZONE, &conn->tuple); found_ct = NULL; if (found != NULL) found_ct = nf_ct_tuplehash_to_ctrack(found); if (found_ct != NULL && nf_ct_tuple_equal(&conn->tuple, tuple) && !already_closed(found_ct)) /* * Just to be sure we have it only once in the list. * We should not see tuples twice unless someone hooks * this into a table without "-p tcp --syn". */ addit = false; if (found == NULL) { /* this one is gone */ hlist_del(&conn->node); kfree(conn); continue; } if (already_closed(found_ct)) { /* * we do not care about connections which are * closed already -> ditch it */ nf_ct_put(found_ct); hlist_del(&conn->node); kfree(conn); continue; } if (same_source_net(addr, mask, &conn->addr, family)) /* same source network -> be counted! */ ++matches; nf_ct_put(found_ct); } rcu_read_unlock(); if (addit) { /* save the new connection in our list */ conn = kmalloc(sizeof(*conn), GFP_ATOMIC); if (conn == NULL) return -ENOMEM; conn->tuple = *tuple; conn->addr = *addr; hlist_add_head(&conn->node, hash); ++matches; } return matches; } static bool connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par) { struct net *net = dev_net(par->in ? par->in : par->out); const struct xt_connlimit_info *info = par->matchinfo; union nf_inet_addr addr; struct nf_conntrack_tuple tuple; const struct nf_conntrack_tuple *tuple_ptr = &tuple; enum ip_conntrack_info ctinfo; const struct nf_conn *ct; int connections; ct = nf_ct_get(skb, &ctinfo); if (ct != NULL) tuple_ptr = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; else if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), par->family, &tuple)) goto hotdrop; if (par->family == NFPROTO_IPV6) { const struct ipv6hdr *iph = ipv6_hdr(skb); memcpy(&addr.ip6, (info->flags & XT_CONNLIMIT_DADDR) ? &iph->daddr : &iph->saddr, sizeof(addr.ip6)); } else { const struct iphdr *iph = ip_hdr(skb); addr.ip = (info->flags & XT_CONNLIMIT_DADDR) ? iph->daddr : iph->saddr; } spin_lock_bh(&info->data->lock); connections = count_them(net, info->data, tuple_ptr, &addr, &info->mask, par->family); spin_unlock_bh(&info->data->lock); if (connections < 0) /* kmalloc failed, drop it entirely */ goto hotdrop; return (connections > info->limit) ^ !!(info->flags & XT_CONNLIMIT_INVERT); hotdrop: par->hotdrop = true; return false; } static int connlimit_mt_check(const struct xt_mtchk_param *par) { struct xt_connlimit_info *info = par->matchinfo; unsigned int i; int ret; if (unlikely(!connlimit_rnd)) { u_int32_t rand; do { get_random_bytes(&rand, sizeof(rand)); } while (!rand); cmpxchg(&connlimit_rnd, 0, rand); } ret = nf_ct_l3proto_try_module_get(par->family); if (ret < 0) { pr_info("cannot load conntrack support for " "address family %u\n", par->family); return ret; } /* init private data */ info->data = kmalloc(sizeof(struct xt_connlimit_data), GFP_KERNEL); if (info->data == NULL) { nf_ct_l3proto_module_put(par->family); return -ENOMEM; } spin_lock_init(&info->data->lock); for (i = 0; i < ARRAY_SIZE(info->data->iphash); ++i) INIT_HLIST_HEAD(&info->data->iphash[i]); return 0; } static void connlimit_mt_destroy(const struct xt_mtdtor_param *par) { const struct xt_connlimit_info *info = par->matchinfo; struct xt_connlimit_conn *conn; struct hlist_node *pos, *n; struct hlist_head *hash = info->data->iphash; unsigned int i; nf_ct_l3proto_module_put(par->family); for (i = 0; i < ARRAY_SIZE(info->data->iphash); ++i) { hlist_for_each_entry_safe(conn, pos, n, &hash[i], node) { hlist_del(&conn->node); kfree(conn); } } kfree(info->data); } static struct xt_match connlimit_mt_reg[] __read_mostly = { { .name = "connlimit", .revision = 0, .family = NFPROTO_UNSPEC, .checkentry = connlimit_mt_check, .match = connlimit_mt, .matchsize = sizeof(struct xt_connlimit_info), .destroy = connlimit_mt_destroy, .me = THIS_MODULE, }, { .name = "connlimit", .revision = 1, .family = NFPROTO_UNSPEC, .checkentry = connlimit_mt_check, .match = connlimit_mt, .matchsize = sizeof(struct xt_connlimit_info), .destroy = connlimit_mt_destroy, .me = THIS_MODULE, }, }; static int __init connlimit_mt_init(void) { return xt_register_matches(connlimit_mt_reg, ARRAY_SIZE(connlimit_mt_reg)); } static void __exit connlimit_mt_exit(void) { xt_unregister_matches(connlimit_mt_reg, ARRAY_SIZE(connlimit_mt_reg)); } module_init(connlimit_mt_init); module_exit(connlimit_mt_exit); MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>"); MODULE_DESCRIPTION("Xtables: Number of connections matching"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ipt_connlimit"); MODULE_ALIAS("ip6t_connlimit");
gpl-2.0
lolhi/at1-Kernel
drivers/staging/sbe-2t3e3/cpld.c
8200
8806
/* * SBE 2T3E3 synchronous serial card driver for Linux * * Copyright (C) 2009-2010 Krzysztof Halasa <khc@pm.waw.pl> * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License * as published by the Free Software Foundation. * * This code is based on a driver written by SBE Inc. */ #include <linux/delay.h> #include "2t3e3.h" #include "ctrl.h" #define bootrom_set_bit(sc, reg, bit) \ bootrom_write((sc), (reg), \ bootrom_read((sc), (reg)) | (bit)) #define bootrom_clear_bit(sc, reg, bit) \ bootrom_write((sc), (reg), \ bootrom_read((sc), (reg)) & ~(bit)) static inline void cpld_set_bit(struct channel *channel, unsigned reg, u32 bit) { unsigned long flags; spin_lock_irqsave(&channel->card->bootrom_lock, flags); bootrom_set_bit(channel, CPLD_MAP_REG(reg, channel), bit); spin_unlock_irqrestore(&channel->card->bootrom_lock, flags); } static inline void cpld_clear_bit(struct channel *channel, unsigned reg, u32 bit) { unsigned long flags; spin_lock_irqsave(&channel->card->bootrom_lock, flags); bootrom_clear_bit(channel, CPLD_MAP_REG(reg, channel), bit); spin_unlock_irqrestore(&channel->card->bootrom_lock, flags); } void cpld_init(struct channel *sc) { u32 val; #if 0 /* reset LIU and Framer */ val = cpld_val_map[SBE_2T3E3_CPLD_VAL_LIU_FRAMER_RESET][sc->h.slot]; cpld_write(sc, SBE_2T3E3_CPLD_REG_STATIC_RESET, val); udelay(10000); /* TODO - how long? */ val = 0; cpld_write(sc, SBE_2T3E3_CPLD_REG_STATIC_RESET, val); #endif /* PCRA */ val = SBE_2T3E3_CPLD_VAL_CRC32 | cpld_val_map[SBE_2T3E3_CPLD_VAL_LOOP_TIMING_SOURCE][sc->h.slot]; cpld_write(sc, SBE_2T3E3_CPLD_REG_PCRA, val); /* PCRB */ val = 0; cpld_write(sc, SBE_2T3E3_CPLD_REG_PCRB, val); /* PCRC */ val = 0; cpld_write(sc, SBE_2T3E3_CPLD_REG_PCRC, val); /* PBWF */ val = 0; cpld_write(sc, SBE_2T3E3_CPLD_REG_PBWF, val); /* PBWL */ val = 0; cpld_write(sc, SBE_2T3E3_CPLD_REG_PBWL, val); /* PLTR */ val = SBE_2T3E3_CPLD_VAL_LCV_COUNTER; cpld_write(sc, SBE_2T3E3_CPLD_REG_PLTR, val); udelay(1000); /* PLCR */ val = 0; cpld_write(sc, SBE_2T3E3_CPLD_REG_PLCR, val); udelay(1000); /* PPFR */ val = 0x55; cpld_write(sc, SBE_2T3E3_CPLD_REG_PPFR, val); /* TODO: this doesn't work!!! */ /* SERIAL_CHIP_SELECT */ val = 0; cpld_write(sc, SBE_2T3E3_CPLD_REG_SERIAL_CHIP_SELECT, val); /* PICSR */ val = SBE_2T3E3_CPLD_VAL_DMO_SIGNAL_DETECTED | SBE_2T3E3_CPLD_VAL_RECEIVE_LOSS_OF_LOCK_DETECTED | SBE_2T3E3_CPLD_VAL_RECEIVE_LOSS_OF_SIGNAL_DETECTED; cpld_write(sc, SBE_2T3E3_CPLD_REG_PICSR, val); cpld_start_intr(sc); udelay(1000); } void cpld_start_intr(struct channel *sc) { u32 val; /* PIER */ val = SBE_2T3E3_CPLD_VAL_INTERRUPT_FROM_ETHERNET_ENABLE | SBE_2T3E3_CPLD_VAL_INTERRUPT_FROM_FRAMER_ENABLE; cpld_write(sc, SBE_2T3E3_CPLD_REG_PIER, val); #if 0 /* do you want to hang up your computer? ENABLE REST OF INTERRUPTS !!! you have been warned :). */ #endif } void cpld_stop_intr(struct channel *sc) { u32 val; /* PIER */ val = 0; cpld_write(sc, SBE_2T3E3_CPLD_REG_PIER, val); } void cpld_set_frame_mode(struct channel *sc, u32 mode) { if (sc->p.frame_mode == mode) return; switch (mode) { case SBE_2T3E3_FRAME_MODE_HDLC: cpld_clear_bit(sc, SBE_2T3E3_CPLD_REG_PCRA, SBE_2T3E3_CPLD_VAL_TRANSPARENT_MODE | SBE_2T3E3_CPLD_VAL_RAW_MODE); exar7250_unipolar_onoff(sc, SBE_2T3E3_OFF); exar7300_unipolar_onoff(sc, SBE_2T3E3_OFF); break; case SBE_2T3E3_FRAME_MODE_TRANSPARENT: cpld_clear_bit(sc, SBE_2T3E3_CPLD_REG_PCRA, SBE_2T3E3_CPLD_VAL_RAW_MODE); cpld_set_bit(sc, SBE_2T3E3_CPLD_REG_PCRA, SBE_2T3E3_CPLD_VAL_TRANSPARENT_MODE); exar7250_unipolar_onoff(sc, SBE_2T3E3_OFF); exar7300_unipolar_onoff(sc, SBE_2T3E3_OFF); break; case SBE_2T3E3_FRAME_MODE_RAW: cpld_set_bit(sc, SBE_2T3E3_CPLD_REG_PCRA, SBE_2T3E3_CPLD_VAL_RAW_MODE); exar7250_unipolar_onoff(sc, SBE_2T3E3_ON); exar7300_unipolar_onoff(sc, SBE_2T3E3_ON); break; default: return; } sc->p.frame_mode = mode; } /* set rate of the local clock */ void cpld_set_frame_type(struct channel *sc, u32 type) { switch (type) { case SBE_2T3E3_FRAME_TYPE_E3_G751: case SBE_2T3E3_FRAME_TYPE_E3_G832: cpld_set_bit(sc, SBE_2T3E3_CPLD_REG_PCRA, SBE_2T3E3_CPLD_VAL_LOCAL_CLOCK_E3); break; case SBE_2T3E3_FRAME_TYPE_T3_CBIT: case SBE_2T3E3_FRAME_TYPE_T3_M13: cpld_clear_bit(sc, SBE_2T3E3_CPLD_REG_PCRA, SBE_2T3E3_CPLD_VAL_LOCAL_CLOCK_E3); break; default: return; } } void cpld_set_scrambler(struct channel *sc, u32 mode) { if (sc->p.scrambler == mode) return; switch (mode) { case SBE_2T3E3_SCRAMBLER_OFF: cpld_clear_bit(sc, SBE_2T3E3_CPLD_REG_PCRB, SBE_2T3E3_CPLD_VAL_SCRAMBLER_ENABLE); break; case SBE_2T3E3_SCRAMBLER_LARSCOM: cpld_clear_bit(sc, SBE_2T3E3_CPLD_REG_PCRB, SBE_2T3E3_CPLD_VAL_SCRAMBLER_TYPE); cpld_set_bit(sc, SBE_2T3E3_CPLD_REG_PCRB, SBE_2T3E3_CPLD_VAL_SCRAMBLER_ENABLE); break; case SBE_2T3E3_SCRAMBLER_ADC_KENTROX_DIGITAL: cpld_set_bit(sc, SBE_2T3E3_CPLD_REG_PCRB, SBE_2T3E3_CPLD_VAL_SCRAMBLER_TYPE); cpld_set_bit(sc, SBE_2T3E3_CPLD_REG_PCRB, SBE_2T3E3_CPLD_VAL_SCRAMBLER_ENABLE); break; default: return; } sc->p.scrambler = mode; } void cpld_set_crc(struct channel *sc, u32 crc) { if (sc->p.crc == crc) return; switch (crc) { case SBE_2T3E3_CRC_16: cpld_clear_bit(sc, SBE_2T3E3_CPLD_REG_PCRA, SBE_2T3E3_CPLD_VAL_CRC32); break; case SBE_2T3E3_CRC_32: cpld_set_bit(sc, SBE_2T3E3_CPLD_REG_PCRA, SBE_2T3E3_CPLD_VAL_CRC32); break; default: return; } sc->p.crc = crc; } void cpld_select_panel(struct channel *sc, u32 panel) { if (sc->p.panel == panel) return; switch (panel) { case SBE_2T3E3_PANEL_FRONT: cpld_clear_bit(sc, SBE_2T3E3_CPLD_REG_PCRA, SBE_2T3E3_CPLD_VAL_REAR_PANEL); break; case SBE_2T3E3_PANEL_REAR: cpld_set_bit(sc, SBE_2T3E3_CPLD_REG_PCRA, SBE_2T3E3_CPLD_VAL_REAR_PANEL); break; default: return; } udelay(100); sc->p.panel = panel; } extern void cpld_set_clock(struct channel *sc, u32 mode) { if (sc->p.clock_source == mode) return; switch (mode) { case SBE_2T3E3_TIMING_LOCAL: cpld_set_bit(sc, SBE_2T3E3_CPLD_REG_PCRA, SBE_2T3E3_CPLD_VAL_ALT); break; case SBE_2T3E3_TIMING_LOOP: cpld_clear_bit(sc, SBE_2T3E3_CPLD_REG_PCRA, SBE_2T3E3_CPLD_VAL_ALT); break; default: return; } sc->p.clock_source = mode; } void cpld_set_pad_count(struct channel *sc, u32 count) { u32 val; if (sc->p.pad_count == count) return; switch (count) { case SBE_2T3E3_PAD_COUNT_1: val = SBE_2T3E3_CPLD_VAL_PAD_COUNT_1; break; case SBE_2T3E3_PAD_COUNT_2: val = SBE_2T3E3_CPLD_VAL_PAD_COUNT_2; break; case SBE_2T3E3_PAD_COUNT_3: val = SBE_2T3E3_CPLD_VAL_PAD_COUNT_3; break; case SBE_2T3E3_PAD_COUNT_4: val = SBE_2T3E3_CPLD_VAL_PAD_COUNT_4; break; default: return; } cpld_clear_bit(sc, SBE_2T3E3_CPLD_REG_PCRB, SBE_2T3E3_CPLD_VAL_PAD_COUNT); cpld_set_bit(sc, SBE_2T3E3_CPLD_REG_PCRB, val); sc->p.pad_count = count; } void cpld_LOS_update(struct channel *sc) { u_int8_t los; cpld_write(sc, SBE_2T3E3_CPLD_REG_PICSR, SBE_2T3E3_CPLD_VAL_DMO_SIGNAL_DETECTED | SBE_2T3E3_CPLD_VAL_RECEIVE_LOSS_OF_LOCK_DETECTED | SBE_2T3E3_CPLD_VAL_RECEIVE_LOSS_OF_SIGNAL_DETECTED); los = cpld_read(sc, SBE_2T3E3_CPLD_REG_PICSR) & SBE_2T3E3_CPLD_VAL_RECEIVE_LOSS_OF_SIGNAL_DETECTED; if (los != sc->s.LOS) dev_info(&sc->pdev->dev, "SBE 2T3E3: LOS status: %s\n", los ? "Loss of signal" : "Signal OK"); sc->s.LOS = los; } void cpld_set_fractional_mode(struct channel *sc, u32 mode, u32 start, u32 stop) { if (mode == SBE_2T3E3_FRACTIONAL_MODE_NONE) { start = 0; stop = 0; } if (sc->p.fractional_mode == mode && sc->p.bandwidth_start == start && sc->p.bandwidth_stop == stop) return; switch (mode) { case SBE_2T3E3_FRACTIONAL_MODE_NONE: cpld_write(sc, SBE_2T3E3_CPLD_REG_PCRC, SBE_2T3E3_CPLD_VAL_FRACTIONAL_MODE_NONE); break; case SBE_2T3E3_FRACTIONAL_MODE_0: cpld_write(sc, SBE_2T3E3_CPLD_REG_PCRC, SBE_2T3E3_CPLD_VAL_FRACTIONAL_MODE_0); break; case SBE_2T3E3_FRACTIONAL_MODE_1: cpld_write(sc, SBE_2T3E3_CPLD_REG_PCRC, SBE_2T3E3_CPLD_VAL_FRACTIONAL_MODE_1); break; case SBE_2T3E3_FRACTIONAL_MODE_2: cpld_write(sc, SBE_2T3E3_CPLD_REG_PCRC, SBE_2T3E3_CPLD_VAL_FRACTIONAL_MODE_2); break; default: printk(KERN_ERR "wrong mode in set_fractional_mode\n"); return; } cpld_write(sc, SBE_2T3E3_CPLD_REG_PBWF, start); cpld_write(sc, SBE_2T3E3_CPLD_REG_PBWL, stop); sc->p.fractional_mode = mode; sc->p.bandwidth_start = start; sc->p.bandwidth_stop = stop; }
gpl-2.0
bourne015/kernel-3.0-s5pv210
drivers/gpu/drm/via/via_irq.c
8456
11234
/* via_irq.c * * Copyright 2004 BEAM Ltd. * Copyright 2002 Tungsten Graphics, Inc. * Copyright 2005 Thomas Hellstrom. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * BEAM LTD, TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Terry Barnaby <terry1@beam.ltd.uk> * Keith Whitwell <keith@tungstengraphics.com> * Thomas Hellstrom <unichrome@shipmail.org> * * This code provides standard DRM access to the Via Unichrome / Pro Vertical blank * interrupt, as well as an infrastructure to handle other interrupts of the chip. * The refresh rate is also calculated for video playback sync purposes. */ #include "drmP.h" #include "drm.h" #include "via_drm.h" #include "via_drv.h" #define VIA_REG_INTERRUPT 0x200 /* VIA_REG_INTERRUPT */ #define VIA_IRQ_GLOBAL (1 << 31) #define VIA_IRQ_VBLANK_ENABLE (1 << 19) #define VIA_IRQ_VBLANK_PENDING (1 << 3) #define VIA_IRQ_HQV0_ENABLE (1 << 11) #define VIA_IRQ_HQV1_ENABLE (1 << 25) #define VIA_IRQ_HQV0_PENDING (1 << 9) #define VIA_IRQ_HQV1_PENDING (1 << 10) #define VIA_IRQ_DMA0_DD_ENABLE (1 << 20) #define VIA_IRQ_DMA0_TD_ENABLE (1 << 21) #define VIA_IRQ_DMA1_DD_ENABLE (1 << 22) #define VIA_IRQ_DMA1_TD_ENABLE (1 << 23) #define VIA_IRQ_DMA0_DD_PENDING (1 << 4) #define VIA_IRQ_DMA0_TD_PENDING (1 << 5) #define VIA_IRQ_DMA1_DD_PENDING (1 << 6) #define VIA_IRQ_DMA1_TD_PENDING (1 << 7) /* * Device-specific IRQs go here. This type might need to be extended with * the register if there are multiple IRQ control registers. * Currently we activate the HQV interrupts of Unichrome Pro group A. */ static maskarray_t via_pro_group_a_irqs[] = { {VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010, 0x00000000 }, {VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010, 0x00000000 }, {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0, VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}, {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1, VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}, }; static int via_num_pro_group_a = ARRAY_SIZE(via_pro_group_a_irqs); static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3}; static maskarray_t via_unichrome_irqs[] = { {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0, VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}, {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1, VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008} }; static int via_num_unichrome = ARRAY_SIZE(via_unichrome_irqs); static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1}; static unsigned time_diff(struct timeval *now, struct timeval *then) { return (now->tv_usec >= then->tv_usec) ? now->tv_usec - then->tv_usec : 1000000 - (then->tv_usec - now->tv_usec); } u32 via_get_vblank_counter(struct drm_device *dev, int crtc) { drm_via_private_t *dev_priv = dev->dev_private; if (crtc != 0) return 0; return atomic_read(&dev_priv->vbl_received); } irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; u32 status; int handled = 0; struct timeval cur_vblank; drm_via_irq_t *cur_irq = dev_priv->via_irqs; int i; status = VIA_READ(VIA_REG_INTERRUPT); if (status & VIA_IRQ_VBLANK_PENDING) { atomic_inc(&dev_priv->vbl_received); if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) { do_gettimeofday(&cur_vblank); if (dev_priv->last_vblank_valid) { dev_priv->usec_per_vblank = time_diff(&cur_vblank, &dev_priv->last_vblank) >> 4; } dev_priv->last_vblank = cur_vblank; dev_priv->last_vblank_valid = 1; } if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) { DRM_DEBUG("US per vblank is: %u\n", dev_priv->usec_per_vblank); } drm_handle_vblank(dev, 0); handled = 1; } for (i = 0; i < dev_priv->num_irqs; ++i) { if (status & cur_irq->pending_mask) { atomic_inc(&cur_irq->irq_received); DRM_WAKEUP(&cur_irq->irq_queue); handled = 1; if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) via_dmablit_handler(dev, 0, 1); else if (dev_priv->irq_map[drm_via_irq_dma1_td] == i) via_dmablit_handler(dev, 1, 1); } cur_irq++; } /* Acknowledge interrupts */ VIA_WRITE(VIA_REG_INTERRUPT, status); if (handled) return IRQ_HANDLED; else return IRQ_NONE; } static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t *dev_priv) { u32 status; if (dev_priv) { /* Acknowledge interrupts */ status = VIA_READ(VIA_REG_INTERRUPT); VIA_WRITE(VIA_REG_INTERRUPT, status | dev_priv->irq_pending_mask); } } int via_enable_vblank(struct drm_device *dev, int crtc) { drm_via_private_t *dev_priv = dev->dev_private; u32 status; if (crtc != 0) { DRM_ERROR("%s: bad crtc %d\n", __func__, crtc); return -EINVAL; } status = VIA_READ(VIA_REG_INTERRUPT); VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_VBLANK_ENABLE); VIA_WRITE8(0x83d4, 0x11); VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30); return 0; } void via_disable_vblank(struct drm_device *dev, int crtc) { drm_via_private_t *dev_priv = dev->dev_private; u32 status; status = VIA_READ(VIA_REG_INTERRUPT); VIA_WRITE(VIA_REG_INTERRUPT, status & ~VIA_IRQ_VBLANK_ENABLE); VIA_WRITE8(0x83d4, 0x11); VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30); if (crtc != 0) DRM_ERROR("%s: bad crtc %d\n", __func__, crtc); } static int via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence, unsigned int *sequence) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; unsigned int cur_irq_sequence; drm_via_irq_t *cur_irq; int ret = 0; maskarray_t *masks; int real_irq; DRM_DEBUG("\n"); if (!dev_priv) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } if (irq >= drm_via_irq_num) { DRM_ERROR("Trying to wait on unknown irq %d\n", irq); return -EINVAL; } real_irq = dev_priv->irq_map[irq]; if (real_irq < 0) { DRM_ERROR("Video IRQ %d not available on this hardware.\n", irq); return -EINVAL; } masks = dev_priv->irq_masks; cur_irq = dev_priv->via_irqs + real_irq; if (masks[real_irq][2] && !force_sequence) { DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ, ((VIA_READ(masks[irq][2]) & masks[irq][3]) == masks[irq][4])); cur_irq_sequence = atomic_read(&cur_irq->irq_received); } else { DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ, (((cur_irq_sequence = atomic_read(&cur_irq->irq_received)) - *sequence) <= (1 << 23))); } *sequence = cur_irq_sequence; return ret; } /* * drm_dma.h hooks */ void via_driver_irq_preinstall(struct drm_device *dev) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; u32 status; drm_via_irq_t *cur_irq; int i; DRM_DEBUG("dev_priv: %p\n", dev_priv); if (dev_priv) { cur_irq = dev_priv->via_irqs; dev_priv->irq_enable_mask = VIA_IRQ_VBLANK_ENABLE; dev_priv->irq_pending_mask = VIA_IRQ_VBLANK_PENDING; if (dev_priv->chipset == VIA_PRO_GROUP_A || dev_priv->chipset == VIA_DX9_0) { dev_priv->irq_masks = via_pro_group_a_irqs; dev_priv->num_irqs = via_num_pro_group_a; dev_priv->irq_map = via_irqmap_pro_group_a; } else { dev_priv->irq_masks = via_unichrome_irqs; dev_priv->num_irqs = via_num_unichrome; dev_priv->irq_map = via_irqmap_unichrome; } for (i = 0; i < dev_priv->num_irqs; ++i) { atomic_set(&cur_irq->irq_received, 0); cur_irq->enable_mask = dev_priv->irq_masks[i][0]; cur_irq->pending_mask = dev_priv->irq_masks[i][1]; DRM_INIT_WAITQUEUE(&cur_irq->irq_queue); dev_priv->irq_enable_mask |= cur_irq->enable_mask; dev_priv->irq_pending_mask |= cur_irq->pending_mask; cur_irq++; DRM_DEBUG("Initializing IRQ %d\n", i); } dev_priv->last_vblank_valid = 0; /* Clear VSync interrupt regs */ status = VIA_READ(VIA_REG_INTERRUPT); VIA_WRITE(VIA_REG_INTERRUPT, status & ~(dev_priv->irq_enable_mask)); /* Clear bits if they're already high */ viadrv_acknowledge_irqs(dev_priv); } } int via_driver_irq_postinstall(struct drm_device *dev) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; u32 status; DRM_DEBUG("via_driver_irq_postinstall\n"); if (!dev_priv) return -EINVAL; status = VIA_READ(VIA_REG_INTERRUPT); VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL | dev_priv->irq_enable_mask); /* Some magic, oh for some data sheets ! */ VIA_WRITE8(0x83d4, 0x11); VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30); return 0; } void via_driver_irq_uninstall(struct drm_device *dev) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; u32 status; DRM_DEBUG("\n"); if (dev_priv) { /* Some more magic, oh for some data sheets ! */ VIA_WRITE8(0x83d4, 0x11); VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30); status = VIA_READ(VIA_REG_INTERRUPT); VIA_WRITE(VIA_REG_INTERRUPT, status & ~(VIA_IRQ_VBLANK_ENABLE | dev_priv->irq_enable_mask)); } } int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_via_irqwait_t *irqwait = data; struct timeval now; int ret = 0; drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; drm_via_irq_t *cur_irq = dev_priv->via_irqs; int force_sequence; if (irqwait->request.irq >= dev_priv->num_irqs) { DRM_ERROR("Trying to wait on unknown irq %d\n", irqwait->request.irq); return -EINVAL; } cur_irq += irqwait->request.irq; switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) { case VIA_IRQ_RELATIVE: irqwait->request.sequence += atomic_read(&cur_irq->irq_received); irqwait->request.type &= ~_DRM_VBLANK_RELATIVE; case VIA_IRQ_ABSOLUTE: break; default: return -EINVAL; } if (irqwait->request.type & VIA_IRQ_SIGNAL) { DRM_ERROR("Signals on Via IRQs not implemented yet.\n"); return -EINVAL; } force_sequence = (irqwait->request.type & VIA_IRQ_FORCE_SEQUENCE); ret = via_driver_irq_wait(dev, irqwait->request.irq, force_sequence, &irqwait->request.sequence); do_gettimeofday(&now); irqwait->reply.tval_sec = now.tv_sec; irqwait->reply.tval_usec = now.tv_usec; return ret; }
gpl-2.0
dohclude/Chucky-LINARO-DragunBall
crypto/zlib.c
9992
9767
/* * Cryptographic API. * * Zlib algorithm * * Copyright 2008 Sony Corporation * * Based on deflate.c, which is * Copyright (c) 2003 James Morris <jmorris@intercode.com.au> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * FIXME: deflate transforms will require up to a total of about 436k of kernel * memory on i386 (390k for compression, the rest for decompression), as the * current zlib kernel code uses a worst case pre-allocation system by default. * This needs to be fixed so that the amount of memory required is properly * related to the winbits and memlevel parameters. */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/init.h> #include <linux/module.h> #include <linux/zlib.h> #include <linux/vmalloc.h> #include <linux/interrupt.h> #include <linux/mm.h> #include <linux/net.h> #include <crypto/internal/compress.h> #include <net/netlink.h> struct zlib_ctx { struct z_stream_s comp_stream; struct z_stream_s decomp_stream; int decomp_windowBits; }; static void zlib_comp_exit(struct zlib_ctx *ctx) { struct z_stream_s *stream = &ctx->comp_stream; if (stream->workspace) { zlib_deflateEnd(stream); vfree(stream->workspace); stream->workspace = NULL; } } static void zlib_decomp_exit(struct zlib_ctx *ctx) { struct z_stream_s *stream = &ctx->decomp_stream; if (stream->workspace) { zlib_inflateEnd(stream); vfree(stream->workspace); stream->workspace = NULL; } } static int zlib_init(struct crypto_tfm *tfm) { return 0; } static void zlib_exit(struct crypto_tfm *tfm) { struct zlib_ctx *ctx = crypto_tfm_ctx(tfm); zlib_comp_exit(ctx); zlib_decomp_exit(ctx); } static int zlib_compress_setup(struct crypto_pcomp *tfm, void *params, unsigned int len) { struct zlib_ctx *ctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm)); struct z_stream_s *stream = &ctx->comp_stream; struct nlattr *tb[ZLIB_COMP_MAX + 1]; int window_bits, mem_level; size_t workspacesize; int ret; ret = nla_parse(tb, ZLIB_COMP_MAX, params, len, NULL); if (ret) return ret; zlib_comp_exit(ctx); window_bits = tb[ZLIB_COMP_WINDOWBITS] ? nla_get_u32(tb[ZLIB_COMP_WINDOWBITS]) : MAX_WBITS; mem_level = tb[ZLIB_COMP_MEMLEVEL] ? nla_get_u32(tb[ZLIB_COMP_MEMLEVEL]) : DEF_MEM_LEVEL; workspacesize = zlib_deflate_workspacesize(window_bits, mem_level); stream->workspace = vzalloc(workspacesize); if (!stream->workspace) return -ENOMEM; ret = zlib_deflateInit2(stream, tb[ZLIB_COMP_LEVEL] ? nla_get_u32(tb[ZLIB_COMP_LEVEL]) : Z_DEFAULT_COMPRESSION, tb[ZLIB_COMP_METHOD] ? nla_get_u32(tb[ZLIB_COMP_METHOD]) : Z_DEFLATED, window_bits, mem_level, tb[ZLIB_COMP_STRATEGY] ? nla_get_u32(tb[ZLIB_COMP_STRATEGY]) : Z_DEFAULT_STRATEGY); if (ret != Z_OK) { vfree(stream->workspace); stream->workspace = NULL; return -EINVAL; } return 0; } static int zlib_compress_init(struct crypto_pcomp *tfm) { int ret; struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm)); struct z_stream_s *stream = &dctx->comp_stream; ret = zlib_deflateReset(stream); if (ret != Z_OK) return -EINVAL; return 0; } static int zlib_compress_update(struct crypto_pcomp *tfm, struct comp_request *req) { int ret; struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm)); struct z_stream_s *stream = &dctx->comp_stream; pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out); stream->next_in = req->next_in; stream->avail_in = req->avail_in; stream->next_out = req->next_out; stream->avail_out = req->avail_out; ret = zlib_deflate(stream, Z_NO_FLUSH); switch (ret) { case Z_OK: break; case Z_BUF_ERROR: pr_debug("zlib_deflate could not make progress\n"); return -EAGAIN; default: pr_debug("zlib_deflate failed %d\n", ret); return -EINVAL; } ret = req->avail_out - stream->avail_out; pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n", stream->avail_in, stream->avail_out, req->avail_in - stream->avail_in, ret); req->next_in = stream->next_in; req->avail_in = stream->avail_in; req->next_out = stream->next_out; req->avail_out = stream->avail_out; return ret; } static int zlib_compress_final(struct crypto_pcomp *tfm, struct comp_request *req) { int ret; struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm)); struct z_stream_s *stream = &dctx->comp_stream; pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out); stream->next_in = req->next_in; stream->avail_in = req->avail_in; stream->next_out = req->next_out; stream->avail_out = req->avail_out; ret = zlib_deflate(stream, Z_FINISH); if (ret != Z_STREAM_END) { pr_debug("zlib_deflate failed %d\n", ret); return -EINVAL; } ret = req->avail_out - stream->avail_out; pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n", stream->avail_in, stream->avail_out, req->avail_in - stream->avail_in, ret); req->next_in = stream->next_in; req->avail_in = stream->avail_in; req->next_out = stream->next_out; req->avail_out = stream->avail_out; return ret; } static int zlib_decompress_setup(struct crypto_pcomp *tfm, void *params, unsigned int len) { struct zlib_ctx *ctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm)); struct z_stream_s *stream = &ctx->decomp_stream; struct nlattr *tb[ZLIB_DECOMP_MAX + 1]; int ret = 0; ret = nla_parse(tb, ZLIB_DECOMP_MAX, params, len, NULL); if (ret) return ret; zlib_decomp_exit(ctx); ctx->decomp_windowBits = tb[ZLIB_DECOMP_WINDOWBITS] ? nla_get_u32(tb[ZLIB_DECOMP_WINDOWBITS]) : DEF_WBITS; stream->workspace = vzalloc(zlib_inflate_workspacesize()); if (!stream->workspace) return -ENOMEM; ret = zlib_inflateInit2(stream, ctx->decomp_windowBits); if (ret != Z_OK) { vfree(stream->workspace); stream->workspace = NULL; return -EINVAL; } return 0; } static int zlib_decompress_init(struct crypto_pcomp *tfm) { int ret; struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm)); struct z_stream_s *stream = &dctx->decomp_stream; ret = zlib_inflateReset(stream); if (ret != Z_OK) return -EINVAL; return 0; } static int zlib_decompress_update(struct crypto_pcomp *tfm, struct comp_request *req) { int ret; struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm)); struct z_stream_s *stream = &dctx->decomp_stream; pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out); stream->next_in = req->next_in; stream->avail_in = req->avail_in; stream->next_out = req->next_out; stream->avail_out = req->avail_out; ret = zlib_inflate(stream, Z_SYNC_FLUSH); switch (ret) { case Z_OK: case Z_STREAM_END: break; case Z_BUF_ERROR: pr_debug("zlib_inflate could not make progress\n"); return -EAGAIN; default: pr_debug("zlib_inflate failed %d\n", ret); return -EINVAL; } ret = req->avail_out - stream->avail_out; pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n", stream->avail_in, stream->avail_out, req->avail_in - stream->avail_in, ret); req->next_in = stream->next_in; req->avail_in = stream->avail_in; req->next_out = stream->next_out; req->avail_out = stream->avail_out; return ret; } static int zlib_decompress_final(struct crypto_pcomp *tfm, struct comp_request *req) { int ret; struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm)); struct z_stream_s *stream = &dctx->decomp_stream; pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out); stream->next_in = req->next_in; stream->avail_in = req->avail_in; stream->next_out = req->next_out; stream->avail_out = req->avail_out; if (dctx->decomp_windowBits < 0) { ret = zlib_inflate(stream, Z_SYNC_FLUSH); /* * Work around a bug in zlib, which sometimes wants to taste an * extra byte when being used in the (undocumented) raw deflate * mode. (From USAGI). */ if (ret == Z_OK && !stream->avail_in && stream->avail_out) { const void *saved_next_in = stream->next_in; u8 zerostuff = 0; stream->next_in = &zerostuff; stream->avail_in = 1; ret = zlib_inflate(stream, Z_FINISH); stream->next_in = saved_next_in; stream->avail_in = 0; } } else ret = zlib_inflate(stream, Z_FINISH); if (ret != Z_STREAM_END) { pr_debug("zlib_inflate failed %d\n", ret); return -EINVAL; } ret = req->avail_out - stream->avail_out; pr_debug("avail_in %u, avail_out %u (consumed %u, produced %u)\n", stream->avail_in, stream->avail_out, req->avail_in - stream->avail_in, ret); req->next_in = stream->next_in; req->avail_in = stream->avail_in; req->next_out = stream->next_out; req->avail_out = stream->avail_out; return ret; } static struct pcomp_alg zlib_alg = { .compress_setup = zlib_compress_setup, .compress_init = zlib_compress_init, .compress_update = zlib_compress_update, .compress_final = zlib_compress_final, .decompress_setup = zlib_decompress_setup, .decompress_init = zlib_decompress_init, .decompress_update = zlib_decompress_update, .decompress_final = zlib_decompress_final, .base = { .cra_name = "zlib", .cra_flags = CRYPTO_ALG_TYPE_PCOMPRESS, .cra_ctxsize = sizeof(struct zlib_ctx), .cra_module = THIS_MODULE, .cra_init = zlib_init, .cra_exit = zlib_exit, } }; static int __init zlib_mod_init(void) { return crypto_register_pcomp(&zlib_alg); } static void __exit zlib_mod_fini(void) { crypto_unregister_pcomp(&zlib_alg); } module_init(zlib_mod_init); module_exit(zlib_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Zlib Compression Algorithm"); MODULE_AUTHOR("Sony Corporation");
gpl-2.0
PKUCloud/linux-3.11-cloud
samples/kobject/kobject-example.c
12040
3323
/* * Sample kobject implementation * * Copyright (C) 2004-2007 Greg Kroah-Hartman <greg@kroah.com> * Copyright (C) 2007 Novell Inc. * * Released under the GPL version 2 only. * */ #include <linux/kobject.h> #include <linux/string.h> #include <linux/sysfs.h> #include <linux/module.h> #include <linux/init.h> /* * This module shows how to create a simple subdirectory in sysfs called * /sys/kernel/kobject-example In that directory, 3 files are created: * "foo", "baz", and "bar". If an integer is written to these files, it can be * later read out of it. */ static int foo; static int baz; static int bar; /* * The "foo" file where a static variable is read from and written to. */ static ssize_t foo_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%d\n", foo); } static ssize_t foo_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { sscanf(buf, "%du", &foo); return count; } static struct kobj_attribute foo_attribute = __ATTR(foo, 0666, foo_show, foo_store); /* * More complex function where we determine which variable is being accessed by * looking at the attribute for the "baz" and "bar" files. */ static ssize_t b_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { int var; if (strcmp(attr->attr.name, "baz") == 0) var = baz; else var = bar; return sprintf(buf, "%d\n", var); } static ssize_t b_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int var; sscanf(buf, "%du", &var); if (strcmp(attr->attr.name, "baz") == 0) baz = var; else bar = var; return count; } static struct kobj_attribute baz_attribute = __ATTR(baz, 0666, b_show, b_store); static struct kobj_attribute bar_attribute = __ATTR(bar, 0666, b_show, b_store); /* * Create a group of attributes so that we can create and destroy them all * at once. */ static struct attribute *attrs[] = { &foo_attribute.attr, &baz_attribute.attr, &bar_attribute.attr, NULL, /* need to NULL terminate the list of attributes */ }; /* * An unnamed attribute group will put all of the attributes directly in * the kobject directory. If we specify a name, a subdirectory will be * created for the attributes with the directory being the name of the * attribute group. */ static struct attribute_group attr_group = { .attrs = attrs, }; static struct kobject *example_kobj; static int __init example_init(void) { int retval; /* * Create a simple kobject with the name of "kobject_example", * located under /sys/kernel/ * * As this is a simple directory, no uevent will be sent to * userspace. That is why this function should not be used for * any type of dynamic kobjects, where the name and number are * not known ahead of time. */ example_kobj = kobject_create_and_add("kobject_example", kernel_kobj); if (!example_kobj) return -ENOMEM; /* Create the files associated with this kobject */ retval = sysfs_create_group(example_kobj, &attr_group); if (retval) kobject_put(example_kobj); return retval; } static void __exit example_exit(void) { kobject_put(example_kobj); } module_init(example_init); module_exit(example_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Greg Kroah-Hartman <greg@kroah.com>");
gpl-2.0
Helios747/dolphin
Source/Core/DolphinQt/SearchBar.cpp
9
1088
// Copyright 2018 Dolphin Emulator Project // Licensed under GPLv2+ // Refer to the license.txt file included. #include "DolphinQt/SearchBar.h" #include <QHBoxLayout> #include <QLineEdit> #include <QPushButton> SearchBar::SearchBar(QWidget* parent) : QWidget(parent) { CreateWidgets(); ConnectWidgets(); setFixedHeight(32); setHidden(true); } void SearchBar::CreateWidgets() { m_search_edit = new QLineEdit; m_close_button = new QPushButton(tr("Close")); m_search_edit->setPlaceholderText(tr("Search games...")); auto* layout = new QHBoxLayout; layout->addWidget(m_search_edit); layout->addWidget(m_close_button); layout->setSizeConstraint(QLayout::SetMinAndMaxSize); setLayout(layout); } void SearchBar::Toggle() { m_search_edit->clear(); setHidden(isVisible()); if (isVisible()) m_search_edit->setFocus(); else m_search_edit->clearFocus(); } void SearchBar::ConnectWidgets() { connect(m_search_edit, &QLineEdit::textChanged, this, &SearchBar::Search); connect(m_close_button, &QPushButton::pressed, this, &SearchBar::Toggle); }
gpl-2.0
YouDiSN/OpenJDK-Research
jdk9/jdk/src/java.base/unix/native/libnet/SocketOutputStream.c
9
4088
/* * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. Oracle designates this * particular file as subject to the "Classpath" exception as provided * by Oracle in the LICENSE file that accompanied this code. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ #include <errno.h> #include <stdlib.h> #include <string.h> #include "net_util.h" #include "java_net_SocketOutputStream.h" #define min(a, b) ((a) < (b) ? (a) : (b)) /* * SocketOutputStream */ static jfieldID IO_fd_fdID; /* * Class: java_net_SocketOutputStream * Method: init * Signature: ()V */ JNIEXPORT void JNICALL Java_java_net_SocketOutputStream_init(JNIEnv *env, jclass cls) { IO_fd_fdID = NET_GetFileDescriptorID(env); } /* * Class: java_net_SocketOutputStream * Method: socketWrite0 * Signature: (Ljava/io/FileDescriptor;[BII)V */ JNIEXPORT void JNICALL Java_java_net_SocketOutputStream_socketWrite0(JNIEnv *env, jobject this, jobject fdObj, jbyteArray data, jint off, jint len) { char *bufP; char BUF[MAX_BUFFER_LEN]; int buflen; int fd; if (IS_NULL(fdObj)) { JNU_ThrowByName(env, "java/net/SocketException", "Socket closed"); return; } else { fd = (*env)->GetIntField(env, fdObj, IO_fd_fdID); /* Bug 4086704 - If the Socket associated with this file descriptor * was closed (sysCloseFD), the file descriptor is set to -1. */ if (fd == -1) { JNU_ThrowByName(env, "java/net/SocketException", "Socket closed"); return; } } if (len <= MAX_BUFFER_LEN) { bufP = BUF; buflen = MAX_BUFFER_LEN; } else { buflen = min(MAX_HEAP_BUFFER_LEN, len); bufP = (char *)malloc((size_t)buflen); /* if heap exhausted resort to stack buffer */ if (bufP == NULL) { bufP = BUF; buflen = MAX_BUFFER_LEN; } } while(len > 0) { int loff = 0; int chunkLen = min(buflen, len); int llen = chunkLen; (*env)->GetByteArrayRegion(env, data, off, chunkLen, (jbyte *)bufP); if ((*env)->ExceptionCheck(env)) { break; } else { while(llen > 0) { int n = NET_Send(fd, bufP + loff, llen, 0); if (n > 0) { llen -= n; loff += n; continue; } if (errno == ECONNRESET) { JNU_ThrowByName(env, "sun/net/ConnectionResetException", "Connection reset"); } else { JNU_ThrowByNameWithMessageAndLastError (env, "java/net/SocketException", "Write failed"); } if (bufP != BUF) { free(bufP); } return; } len -= chunkLen; off += chunkLen; } } if (bufP != BUF) { free(bufP); } }
gpl-2.0
sakuramilk/linux-2.6.32.y
drivers/isdn/hardware/mISDN/hfcpci.c
521
65877
/* * * hfcpci.c low level driver for CCD's hfc-pci based cards * * Author Werner Cornelius (werner@isdn4linux.de) * based on existing driver for CCD hfc ISA cards * type approval valid for HFC-S PCI A based card * * Copyright 1999 by Werner Cornelius (werner@isdn-development.de) * Copyright 2008 by Karsten Keil <kkeil@novell.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Module options: * * debug: * NOTE: only one poll value must be given for all cards * See hfc_pci.h for debug flags. * * poll: * NOTE: only one poll value must be given for all cards * Give the number of samples for each fifo process. * By default 128 is used. Decrease to reduce delay, increase to * reduce cpu load. If unsure, don't mess with it! * A value of 128 will use controller's interrupt. Other values will * use kernel timer, because the controller will not allow lower values * than 128. * Also note that the value depends on the kernel timer frequency. * If kernel uses a frequency of 1000 Hz, steps of 8 samples are possible. * If the kernel uses 100 Hz, steps of 80 samples are possible. * If the kernel uses 300 Hz, steps of about 26 samples are possible. * */ #include <linux/module.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/mISDNhw.h> #include "hfc_pci.h" static const char *hfcpci_revision = "2.0"; static int HFC_cnt; static uint debug; static uint poll, tics; static struct timer_list hfc_tl; static unsigned long hfc_jiffies; MODULE_AUTHOR("Karsten Keil"); MODULE_LICENSE("GPL"); module_param(debug, uint, S_IRUGO | S_IWUSR); module_param(poll, uint, S_IRUGO | S_IWUSR); enum { HFC_CCD_2BD0, HFC_CCD_B000, HFC_CCD_B006, HFC_CCD_B007, HFC_CCD_B008, HFC_CCD_B009, HFC_CCD_B00A, HFC_CCD_B00B, HFC_CCD_B00C, HFC_CCD_B100, HFC_CCD_B700, HFC_CCD_B701, HFC_ASUS_0675, HFC_BERKOM_A1T, HFC_BERKOM_TCONCEPT, HFC_ANIGMA_MC145575, HFC_ZOLTRIX_2BD0, HFC_DIGI_DF_M_IOM2_E, HFC_DIGI_DF_M_E, HFC_DIGI_DF_M_IOM2_A, HFC_DIGI_DF_M_A, HFC_ABOCOM_2BD1, HFC_SITECOM_DC105V2, }; struct hfcPCI_hw { unsigned char cirm; unsigned char ctmt; unsigned char clkdel; unsigned char states; unsigned char conn; unsigned char mst_m; unsigned char int_m1; unsigned char int_m2; unsigned char sctrl; unsigned char sctrl_r; unsigned char sctrl_e; unsigned char trm; unsigned char fifo_en; unsigned char bswapped; unsigned char protocol; int nt_timer; unsigned char __iomem *pci_io; /* start of PCI IO memory */ dma_addr_t dmahandle; void *fifos; /* FIFO memory */ int last_bfifo_cnt[2]; /* marker saving last b-fifo frame count */ struct timer_list timer; }; #define HFC_CFG_MASTER 1 #define HFC_CFG_SLAVE 2 #define HFC_CFG_PCM 3 #define HFC_CFG_2HFC 4 #define HFC_CFG_SLAVEHFC 5 #define HFC_CFG_NEG_F0 6 #define HFC_CFG_SW_DD_DU 7 #define FLG_HFC_TIMER_T1 16 #define FLG_HFC_TIMER_T3 17 #define NT_T1_COUNT 1120 /* number of 3.125ms interrupts (3.5s) */ #define NT_T3_COUNT 31 /* number of 3.125ms interrupts (97 ms) */ #define CLKDEL_TE 0x0e /* CLKDEL in TE mode */ #define CLKDEL_NT 0x6c /* CLKDEL in NT mode */ struct hfc_pci { u_char subtype; u_char chanlimit; u_char initdone; u_long cfg; u_int irq; u_int irqcnt; struct pci_dev *pdev; struct hfcPCI_hw hw; spinlock_t lock; /* card lock */ struct dchannel dch; struct bchannel bch[2]; }; /* Interface functions */ static void enable_hwirq(struct hfc_pci *hc) { hc->hw.int_m2 |= HFCPCI_IRQ_ENABLE; Write_hfc(hc, HFCPCI_INT_M2, hc->hw.int_m2); } static void disable_hwirq(struct hfc_pci *hc) { hc->hw.int_m2 &= ~((u_char)HFCPCI_IRQ_ENABLE); Write_hfc(hc, HFCPCI_INT_M2, hc->hw.int_m2); } /* * free hardware resources used by driver */ static void release_io_hfcpci(struct hfc_pci *hc) { /* disable memory mapped ports + busmaster */ pci_write_config_word(hc->pdev, PCI_COMMAND, 0); del_timer(&hc->hw.timer); pci_free_consistent(hc->pdev, 0x8000, hc->hw.fifos, hc->hw.dmahandle); iounmap(hc->hw.pci_io); } /* * set mode (NT or TE) */ static void hfcpci_setmode(struct hfc_pci *hc) { if (hc->hw.protocol == ISDN_P_NT_S0) { hc->hw.clkdel = CLKDEL_NT; /* ST-Bit delay for NT-Mode */ hc->hw.sctrl |= SCTRL_MODE_NT; /* NT-MODE */ hc->hw.states = 1; /* G1 */ } else { hc->hw.clkdel = CLKDEL_TE; /* ST-Bit delay for TE-Mode */ hc->hw.sctrl &= ~SCTRL_MODE_NT; /* TE-MODE */ hc->hw.states = 2; /* F2 */ } Write_hfc(hc, HFCPCI_CLKDEL, hc->hw.clkdel); Write_hfc(hc, HFCPCI_STATES, HFCPCI_LOAD_STATE | hc->hw.states); udelay(10); Write_hfc(hc, HFCPCI_STATES, hc->hw.states | 0x40); /* Deactivate */ Write_hfc(hc, HFCPCI_SCTRL, hc->hw.sctrl); } /* * function called to reset the HFC PCI chip. A complete software reset of chip * and fifos is done. */ static void reset_hfcpci(struct hfc_pci *hc) { u_char val; int cnt = 0; printk(KERN_DEBUG "reset_hfcpci: entered\n"); val = Read_hfc(hc, HFCPCI_CHIP_ID); printk(KERN_INFO "HFC_PCI: resetting HFC ChipId(%x)\n", val); /* enable memory mapped ports, disable busmaster */ pci_write_config_word(hc->pdev, PCI_COMMAND, PCI_ENA_MEMIO); disable_hwirq(hc); /* enable memory ports + busmaster */ pci_write_config_word(hc->pdev, PCI_COMMAND, PCI_ENA_MEMIO + PCI_ENA_MASTER); val = Read_hfc(hc, HFCPCI_STATUS); printk(KERN_DEBUG "HFC-PCI status(%x) before reset\n", val); hc->hw.cirm = HFCPCI_RESET; /* Reset On */ Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm); set_current_state(TASK_UNINTERRUPTIBLE); mdelay(10); /* Timeout 10ms */ hc->hw.cirm = 0; /* Reset Off */ Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm); val = Read_hfc(hc, HFCPCI_STATUS); printk(KERN_DEBUG "HFC-PCI status(%x) after reset\n", val); while (cnt < 50000) { /* max 50000 us */ udelay(5); cnt += 5; val = Read_hfc(hc, HFCPCI_STATUS); if (!(val & 2)) break; } printk(KERN_DEBUG "HFC-PCI status(%x) after %dus\n", val, cnt); hc->hw.fifo_en = 0x30; /* only D fifos enabled */ hc->hw.bswapped = 0; /* no exchange */ hc->hw.ctmt = HFCPCI_TIM3_125 | HFCPCI_AUTO_TIMER; hc->hw.trm = HFCPCI_BTRANS_THRESMASK; /* no echo connect , threshold */ hc->hw.sctrl = 0x40; /* set tx_lo mode, error in datasheet ! */ hc->hw.sctrl_r = 0; hc->hw.sctrl_e = HFCPCI_AUTO_AWAKE; /* S/T Auto awake */ hc->hw.mst_m = 0; if (test_bit(HFC_CFG_MASTER, &hc->cfg)) hc->hw.mst_m |= HFCPCI_MASTER; /* HFC Master Mode */ if (test_bit(HFC_CFG_NEG_F0, &hc->cfg)) hc->hw.mst_m |= HFCPCI_F0_NEGATIV; Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en); Write_hfc(hc, HFCPCI_TRM, hc->hw.trm); Write_hfc(hc, HFCPCI_SCTRL_E, hc->hw.sctrl_e); Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt); hc->hw.int_m1 = HFCPCI_INTS_DTRANS | HFCPCI_INTS_DREC | HFCPCI_INTS_L1STATE | HFCPCI_INTS_TIMER; Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1); /* Clear already pending ints */ val = Read_hfc(hc, HFCPCI_INT_S1); /* set NT/TE mode */ hfcpci_setmode(hc); Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m); Write_hfc(hc, HFCPCI_SCTRL_R, hc->hw.sctrl_r); /* * Init GCI/IOM2 in master mode * Slots 0 and 1 are set for B-chan 1 and 2 * D- and monitor/CI channel are not enabled * STIO1 is used as output for data, B1+B2 from ST->IOM+HFC * STIO2 is used as data input, B1+B2 from IOM->ST * ST B-channel send disabled -> continous 1s * The IOM slots are always enabled */ if (test_bit(HFC_CFG_PCM, &hc->cfg)) { /* set data flow directions: connect B1,B2: HFC to/from PCM */ hc->hw.conn = 0x09; } else { hc->hw.conn = 0x36; /* set data flow directions */ if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg)) { Write_hfc(hc, HFCPCI_B1_SSL, 0xC0); Write_hfc(hc, HFCPCI_B2_SSL, 0xC1); Write_hfc(hc, HFCPCI_B1_RSL, 0xC0); Write_hfc(hc, HFCPCI_B2_RSL, 0xC1); } else { Write_hfc(hc, HFCPCI_B1_SSL, 0x80); Write_hfc(hc, HFCPCI_B2_SSL, 0x81); Write_hfc(hc, HFCPCI_B1_RSL, 0x80); Write_hfc(hc, HFCPCI_B2_RSL, 0x81); } } Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn); val = Read_hfc(hc, HFCPCI_INT_S2); } /* * Timer function called when kernel timer expires */ static void hfcpci_Timer(struct hfc_pci *hc) { hc->hw.timer.expires = jiffies + 75; /* WD RESET */ /* * WriteReg(hc, HFCD_DATA, HFCD_CTMT, hc->hw.ctmt | 0x80); * add_timer(&hc->hw.timer); */ } /* * select a b-channel entry matching and active */ static struct bchannel * Sel_BCS(struct hfc_pci *hc, int channel) { if (test_bit(FLG_ACTIVE, &hc->bch[0].Flags) && (hc->bch[0].nr & channel)) return &hc->bch[0]; else if (test_bit(FLG_ACTIVE, &hc->bch[1].Flags) && (hc->bch[1].nr & channel)) return &hc->bch[1]; else return NULL; } /* * clear the desired B-channel rx fifo */ static void hfcpci_clear_fifo_rx(struct hfc_pci *hc, int fifo) { u_char fifo_state; struct bzfifo *bzr; if (fifo) { bzr = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b2; fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B2RX; } else { bzr = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b1; fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B1RX; } if (fifo_state) hc->hw.fifo_en ^= fifo_state; Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en); hc->hw.last_bfifo_cnt[fifo] = 0; bzr->f1 = MAX_B_FRAMES; bzr->f2 = bzr->f1; /* init F pointers to remain constant */ bzr->za[MAX_B_FRAMES].z1 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 1); bzr->za[MAX_B_FRAMES].z2 = cpu_to_le16( le16_to_cpu(bzr->za[MAX_B_FRAMES].z1)); if (fifo_state) hc->hw.fifo_en |= fifo_state; Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en); } /* * clear the desired B-channel tx fifo */ static void hfcpci_clear_fifo_tx(struct hfc_pci *hc, int fifo) { u_char fifo_state; struct bzfifo *bzt; if (fifo) { bzt = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2; fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B2TX; } else { bzt = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b1; fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B1TX; } if (fifo_state) hc->hw.fifo_en ^= fifo_state; Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en); if (hc->bch[fifo].debug & DEBUG_HW_BCHANNEL) printk(KERN_DEBUG "hfcpci_clear_fifo_tx%d f1(%x) f2(%x) " "z1(%x) z2(%x) state(%x)\n", fifo, bzt->f1, bzt->f2, le16_to_cpu(bzt->za[MAX_B_FRAMES].z1), le16_to_cpu(bzt->za[MAX_B_FRAMES].z2), fifo_state); bzt->f2 = MAX_B_FRAMES; bzt->f1 = bzt->f2; /* init F pointers to remain constant */ bzt->za[MAX_B_FRAMES].z1 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 1); bzt->za[MAX_B_FRAMES].z2 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 2); if (fifo_state) hc->hw.fifo_en |= fifo_state; Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en); if (hc->bch[fifo].debug & DEBUG_HW_BCHANNEL) printk(KERN_DEBUG "hfcpci_clear_fifo_tx%d f1(%x) f2(%x) z1(%x) z2(%x)\n", fifo, bzt->f1, bzt->f2, le16_to_cpu(bzt->za[MAX_B_FRAMES].z1), le16_to_cpu(bzt->za[MAX_B_FRAMES].z2)); } /* * read a complete B-frame out of the buffer */ static void hfcpci_empty_bfifo(struct bchannel *bch, struct bzfifo *bz, u_char *bdata, int count) { u_char *ptr, *ptr1, new_f2; int total, maxlen, new_z2; struct zt *zp; if ((bch->debug & DEBUG_HW_BCHANNEL) && !(bch->debug & DEBUG_HW_BFIFO)) printk(KERN_DEBUG "hfcpci_empty_fifo\n"); zp = &bz->za[bz->f2]; /* point to Z-Regs */ new_z2 = le16_to_cpu(zp->z2) + count; /* new position in fifo */ if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL)) new_z2 -= B_FIFO_SIZE; /* buffer wrap */ new_f2 = (bz->f2 + 1) & MAX_B_FRAMES; if ((count > MAX_DATA_SIZE + 3) || (count < 4) || (*(bdata + (le16_to_cpu(zp->z1) - B_SUB_VAL)))) { if (bch->debug & DEBUG_HW) printk(KERN_DEBUG "hfcpci_empty_fifo: incoming packet " "invalid length %d or crc\n", count); #ifdef ERROR_STATISTIC bch->err_inv++; #endif bz->za[new_f2].z2 = cpu_to_le16(new_z2); bz->f2 = new_f2; /* next buffer */ } else { bch->rx_skb = mI_alloc_skb(count - 3, GFP_ATOMIC); if (!bch->rx_skb) { printk(KERN_WARNING "HFCPCI: receive out of memory\n"); return; } total = count; count -= 3; ptr = skb_put(bch->rx_skb, count); if (le16_to_cpu(zp->z2) + count <= B_FIFO_SIZE + B_SUB_VAL) maxlen = count; /* complete transfer */ else maxlen = B_FIFO_SIZE + B_SUB_VAL - le16_to_cpu(zp->z2); /* maximum */ ptr1 = bdata + (le16_to_cpu(zp->z2) - B_SUB_VAL); /* start of data */ memcpy(ptr, ptr1, maxlen); /* copy data */ count -= maxlen; if (count) { /* rest remaining */ ptr += maxlen; ptr1 = bdata; /* start of buffer */ memcpy(ptr, ptr1, count); /* rest */ } bz->za[new_f2].z2 = cpu_to_le16(new_z2); bz->f2 = new_f2; /* next buffer */ recv_Bchannel(bch, MISDN_ID_ANY); } } /* * D-channel receive procedure */ static int receive_dmsg(struct hfc_pci *hc) { struct dchannel *dch = &hc->dch; int maxlen; int rcnt, total; int count = 5; u_char *ptr, *ptr1; struct dfifo *df; struct zt *zp; df = &((union fifo_area *)(hc->hw.fifos))->d_chan.d_rx; while (((df->f1 & D_FREG_MASK) != (df->f2 & D_FREG_MASK)) && count--) { zp = &df->za[df->f2 & D_FREG_MASK]; rcnt = le16_to_cpu(zp->z1) - le16_to_cpu(zp->z2); if (rcnt < 0) rcnt += D_FIFO_SIZE; rcnt++; if (dch->debug & DEBUG_HW_DCHANNEL) printk(KERN_DEBUG "hfcpci recd f1(%d) f2(%d) z1(%x) z2(%x) cnt(%d)\n", df->f1, df->f2, le16_to_cpu(zp->z1), le16_to_cpu(zp->z2), rcnt); if ((rcnt > MAX_DFRAME_LEN + 3) || (rcnt < 4) || (df->data[le16_to_cpu(zp->z1)])) { if (dch->debug & DEBUG_HW) printk(KERN_DEBUG "empty_fifo hfcpci paket inv. len " "%d or crc %d\n", rcnt, df->data[le16_to_cpu(zp->z1)]); #ifdef ERROR_STATISTIC cs->err_rx++; #endif df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) | (MAX_D_FRAMES + 1); /* next buffer */ df->za[df->f2 & D_FREG_MASK].z2 = cpu_to_le16((le16_to_cpu(zp->z2) + rcnt) & (D_FIFO_SIZE - 1)); } else { dch->rx_skb = mI_alloc_skb(rcnt - 3, GFP_ATOMIC); if (!dch->rx_skb) { printk(KERN_WARNING "HFC-PCI: D receive out of memory\n"); break; } total = rcnt; rcnt -= 3; ptr = skb_put(dch->rx_skb, rcnt); if (le16_to_cpu(zp->z2) + rcnt <= D_FIFO_SIZE) maxlen = rcnt; /* complete transfer */ else maxlen = D_FIFO_SIZE - le16_to_cpu(zp->z2); /* maximum */ ptr1 = df->data + le16_to_cpu(zp->z2); /* start of data */ memcpy(ptr, ptr1, maxlen); /* copy data */ rcnt -= maxlen; if (rcnt) { /* rest remaining */ ptr += maxlen; ptr1 = df->data; /* start of buffer */ memcpy(ptr, ptr1, rcnt); /* rest */ } df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) | (MAX_D_FRAMES + 1); /* next buffer */ df->za[df->f2 & D_FREG_MASK].z2 = cpu_to_le16(( le16_to_cpu(zp->z2) + total) & (D_FIFO_SIZE - 1)); recv_Dchannel(dch); } } return 1; } /* * check for transparent receive data and read max one 'poll' size if avail */ static void hfcpci_empty_fifo_trans(struct bchannel *bch, struct bzfifo *rxbz, struct bzfifo *txbz, u_char *bdata) { __le16 *z1r, *z2r, *z1t, *z2t; int new_z2, fcnt_rx, fcnt_tx, maxlen; u_char *ptr, *ptr1; z1r = &rxbz->za[MAX_B_FRAMES].z1; /* pointer to z reg */ z2r = z1r + 1; z1t = &txbz->za[MAX_B_FRAMES].z1; z2t = z1t + 1; fcnt_rx = le16_to_cpu(*z1r) - le16_to_cpu(*z2r); if (!fcnt_rx) return; /* no data avail */ if (fcnt_rx <= 0) fcnt_rx += B_FIFO_SIZE; /* bytes actually buffered */ new_z2 = le16_to_cpu(*z2r) + fcnt_rx; /* new position in fifo */ if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL)) new_z2 -= B_FIFO_SIZE; /* buffer wrap */ if (fcnt_rx > MAX_DATA_SIZE) { /* flush, if oversized */ *z2r = cpu_to_le16(new_z2); /* new position */ return; } fcnt_tx = le16_to_cpu(*z2t) - le16_to_cpu(*z1t); if (fcnt_tx <= 0) fcnt_tx += B_FIFO_SIZE; /* fcnt_tx contains available bytes in tx-fifo */ fcnt_tx = B_FIFO_SIZE - fcnt_tx; /* remaining bytes to send (bytes in tx-fifo) */ bch->rx_skb = mI_alloc_skb(fcnt_rx, GFP_ATOMIC); if (bch->rx_skb) { ptr = skb_put(bch->rx_skb, fcnt_rx); if (le16_to_cpu(*z2r) + fcnt_rx <= B_FIFO_SIZE + B_SUB_VAL) maxlen = fcnt_rx; /* complete transfer */ else maxlen = B_FIFO_SIZE + B_SUB_VAL - le16_to_cpu(*z2r); /* maximum */ ptr1 = bdata + (le16_to_cpu(*z2r) - B_SUB_VAL); /* start of data */ memcpy(ptr, ptr1, maxlen); /* copy data */ fcnt_rx -= maxlen; if (fcnt_rx) { /* rest remaining */ ptr += maxlen; ptr1 = bdata; /* start of buffer */ memcpy(ptr, ptr1, fcnt_rx); /* rest */ } recv_Bchannel(bch, fcnt_tx); /* bch, id */ } else printk(KERN_WARNING "HFCPCI: receive out of memory\n"); *z2r = cpu_to_le16(new_z2); /* new position */ } /* * B-channel main receive routine */ static void main_rec_hfcpci(struct bchannel *bch) { struct hfc_pci *hc = bch->hw; int rcnt, real_fifo; int receive = 0, count = 5; struct bzfifo *txbz, *rxbz; u_char *bdata; struct zt *zp; if ((bch->nr & 2) && (!hc->hw.bswapped)) { rxbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b2; txbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2; bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.rxdat_b2; real_fifo = 1; } else { rxbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b1; txbz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b1; bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.rxdat_b1; real_fifo = 0; } Begin: count--; if (rxbz->f1 != rxbz->f2) { if (bch->debug & DEBUG_HW_BCHANNEL) printk(KERN_DEBUG "hfcpci rec ch(%x) f1(%d) f2(%d)\n", bch->nr, rxbz->f1, rxbz->f2); zp = &rxbz->za[rxbz->f2]; rcnt = le16_to_cpu(zp->z1) - le16_to_cpu(zp->z2); if (rcnt < 0) rcnt += B_FIFO_SIZE; rcnt++; if (bch->debug & DEBUG_HW_BCHANNEL) printk(KERN_DEBUG "hfcpci rec ch(%x) z1(%x) z2(%x) cnt(%d)\n", bch->nr, le16_to_cpu(zp->z1), le16_to_cpu(zp->z2), rcnt); hfcpci_empty_bfifo(bch, rxbz, bdata, rcnt); rcnt = rxbz->f1 - rxbz->f2; if (rcnt < 0) rcnt += MAX_B_FRAMES + 1; if (hc->hw.last_bfifo_cnt[real_fifo] > rcnt + 1) { rcnt = 0; hfcpci_clear_fifo_rx(hc, real_fifo); } hc->hw.last_bfifo_cnt[real_fifo] = rcnt; if (rcnt > 1) receive = 1; else receive = 0; } else if (test_bit(FLG_TRANSPARENT, &bch->Flags)) { hfcpci_empty_fifo_trans(bch, rxbz, txbz, bdata); return; } else receive = 0; if (count && receive) goto Begin; } /* * D-channel send routine */ static void hfcpci_fill_dfifo(struct hfc_pci *hc) { struct dchannel *dch = &hc->dch; int fcnt; int count, new_z1, maxlen; struct dfifo *df; u_char *src, *dst, new_f1; if ((dch->debug & DEBUG_HW_DCHANNEL) && !(dch->debug & DEBUG_HW_DFIFO)) printk(KERN_DEBUG "%s\n", __func__); if (!dch->tx_skb) return; count = dch->tx_skb->len - dch->tx_idx; if (count <= 0) return; df = &((union fifo_area *) (hc->hw.fifos))->d_chan.d_tx; if (dch->debug & DEBUG_HW_DFIFO) printk(KERN_DEBUG "%s:f1(%d) f2(%d) z1(f1)(%x)\n", __func__, df->f1, df->f2, le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1)); fcnt = df->f1 - df->f2; /* frame count actually buffered */ if (fcnt < 0) fcnt += (MAX_D_FRAMES + 1); /* if wrap around */ if (fcnt > (MAX_D_FRAMES - 1)) { if (dch->debug & DEBUG_HW_DCHANNEL) printk(KERN_DEBUG "hfcpci_fill_Dfifo more as 14 frames\n"); #ifdef ERROR_STATISTIC cs->err_tx++; #endif return; } /* now determine free bytes in FIFO buffer */ maxlen = le16_to_cpu(df->za[df->f2 & D_FREG_MASK].z2) - le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1) - 1; if (maxlen <= 0) maxlen += D_FIFO_SIZE; /* count now contains available bytes */ if (dch->debug & DEBUG_HW_DCHANNEL) printk(KERN_DEBUG "hfcpci_fill_Dfifo count(%d/%d)\n", count, maxlen); if (count > maxlen) { if (dch->debug & DEBUG_HW_DCHANNEL) printk(KERN_DEBUG "hfcpci_fill_Dfifo no fifo mem\n"); return; } new_z1 = (le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1) + count) & (D_FIFO_SIZE - 1); new_f1 = ((df->f1 + 1) & D_FREG_MASK) | (D_FREG_MASK + 1); src = dch->tx_skb->data + dch->tx_idx; /* source pointer */ dst = df->data + le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1); maxlen = D_FIFO_SIZE - le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1); /* end fifo */ if (maxlen > count) maxlen = count; /* limit size */ memcpy(dst, src, maxlen); /* first copy */ count -= maxlen; /* remaining bytes */ if (count) { dst = df->data; /* start of buffer */ src += maxlen; /* new position */ memcpy(dst, src, count); } df->za[new_f1 & D_FREG_MASK].z1 = cpu_to_le16(new_z1); /* for next buffer */ df->za[df->f1 & D_FREG_MASK].z1 = cpu_to_le16(new_z1); /* new pos actual buffer */ df->f1 = new_f1; /* next frame */ dch->tx_idx = dch->tx_skb->len; } /* * B-channel send routine */ static void hfcpci_fill_fifo(struct bchannel *bch) { struct hfc_pci *hc = bch->hw; int maxlen, fcnt; int count, new_z1; struct bzfifo *bz; u_char *bdata; u_char new_f1, *src, *dst; __le16 *z1t, *z2t; if ((bch->debug & DEBUG_HW_BCHANNEL) && !(bch->debug & DEBUG_HW_BFIFO)) printk(KERN_DEBUG "%s\n", __func__); if ((!bch->tx_skb) || bch->tx_skb->len <= 0) return; count = bch->tx_skb->len - bch->tx_idx; if ((bch->nr & 2) && (!hc->hw.bswapped)) { bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2; bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.txdat_b2; } else { bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b1; bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.txdat_b1; } if (test_bit(FLG_TRANSPARENT, &bch->Flags)) { z1t = &bz->za[MAX_B_FRAMES].z1; z2t = z1t + 1; if (bch->debug & DEBUG_HW_BCHANNEL) printk(KERN_DEBUG "hfcpci_fill_fifo_trans ch(%x) " "cnt(%d) z1(%x) z2(%x)\n", bch->nr, count, le16_to_cpu(*z1t), le16_to_cpu(*z2t)); fcnt = le16_to_cpu(*z2t) - le16_to_cpu(*z1t); if (fcnt <= 0) fcnt += B_FIFO_SIZE; /* fcnt contains available bytes in fifo */ fcnt = B_FIFO_SIZE - fcnt; /* remaining bytes to send (bytes in fifo) */ /* "fill fifo if empty" feature */ if (test_bit(FLG_FILLEMPTY, &bch->Flags) && !fcnt) { /* printk(KERN_DEBUG "%s: buffer empty, so we have " "underrun\n", __func__); */ /* fill buffer, to prevent future underrun */ count = HFCPCI_FILLEMPTY; new_z1 = le16_to_cpu(*z1t) + count; /* new buffer Position */ if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL)) new_z1 -= B_FIFO_SIZE; /* buffer wrap */ dst = bdata + (le16_to_cpu(*z1t) - B_SUB_VAL); maxlen = (B_FIFO_SIZE + B_SUB_VAL) - le16_to_cpu(*z1t); /* end of fifo */ if (bch->debug & DEBUG_HW_BFIFO) printk(KERN_DEBUG "hfcpci_FFt fillempty " "fcnt(%d) maxl(%d) nz1(%x) dst(%p)\n", fcnt, maxlen, new_z1, dst); fcnt += count; if (maxlen > count) maxlen = count; /* limit size */ memset(dst, 0x2a, maxlen); /* first copy */ count -= maxlen; /* remaining bytes */ if (count) { dst = bdata; /* start of buffer */ memset(dst, 0x2a, count); } *z1t = cpu_to_le16(new_z1); /* now send data */ } next_t_frame: count = bch->tx_skb->len - bch->tx_idx; /* maximum fill shall be poll*2 */ if (count > (poll << 1) - fcnt) count = (poll << 1) - fcnt; if (count <= 0) return; /* data is suitable for fifo */ new_z1 = le16_to_cpu(*z1t) + count; /* new buffer Position */ if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL)) new_z1 -= B_FIFO_SIZE; /* buffer wrap */ src = bch->tx_skb->data + bch->tx_idx; /* source pointer */ dst = bdata + (le16_to_cpu(*z1t) - B_SUB_VAL); maxlen = (B_FIFO_SIZE + B_SUB_VAL) - le16_to_cpu(*z1t); /* end of fifo */ if (bch->debug & DEBUG_HW_BFIFO) printk(KERN_DEBUG "hfcpci_FFt fcnt(%d) " "maxl(%d) nz1(%x) dst(%p)\n", fcnt, maxlen, new_z1, dst); fcnt += count; bch->tx_idx += count; if (maxlen > count) maxlen = count; /* limit size */ memcpy(dst, src, maxlen); /* first copy */ count -= maxlen; /* remaining bytes */ if (count) { dst = bdata; /* start of buffer */ src += maxlen; /* new position */ memcpy(dst, src, count); } *z1t = cpu_to_le16(new_z1); /* now send data */ if (bch->tx_idx < bch->tx_skb->len) return; /* send confirm, on trans, free on hdlc. */ if (test_bit(FLG_TRANSPARENT, &bch->Flags)) confirm_Bsend(bch); dev_kfree_skb(bch->tx_skb); if (get_next_bframe(bch)) goto next_t_frame; return; } if (bch->debug & DEBUG_HW_BCHANNEL) printk(KERN_DEBUG "%s: ch(%x) f1(%d) f2(%d) z1(f1)(%x)\n", __func__, bch->nr, bz->f1, bz->f2, bz->za[bz->f1].z1); fcnt = bz->f1 - bz->f2; /* frame count actually buffered */ if (fcnt < 0) fcnt += (MAX_B_FRAMES + 1); /* if wrap around */ if (fcnt > (MAX_B_FRAMES - 1)) { if (bch->debug & DEBUG_HW_BCHANNEL) printk(KERN_DEBUG "hfcpci_fill_Bfifo more as 14 frames\n"); return; } /* now determine free bytes in FIFO buffer */ maxlen = le16_to_cpu(bz->za[bz->f2].z2) - le16_to_cpu(bz->za[bz->f1].z1) - 1; if (maxlen <= 0) maxlen += B_FIFO_SIZE; /* count now contains available bytes */ if (bch->debug & DEBUG_HW_BCHANNEL) printk(KERN_DEBUG "hfcpci_fill_fifo ch(%x) count(%d/%d)\n", bch->nr, count, maxlen); if (maxlen < count) { if (bch->debug & DEBUG_HW_BCHANNEL) printk(KERN_DEBUG "hfcpci_fill_fifo no fifo mem\n"); return; } new_z1 = le16_to_cpu(bz->za[bz->f1].z1) + count; /* new buffer Position */ if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL)) new_z1 -= B_FIFO_SIZE; /* buffer wrap */ new_f1 = ((bz->f1 + 1) & MAX_B_FRAMES); src = bch->tx_skb->data + bch->tx_idx; /* source pointer */ dst = bdata + (le16_to_cpu(bz->za[bz->f1].z1) - B_SUB_VAL); maxlen = (B_FIFO_SIZE + B_SUB_VAL) - le16_to_cpu(bz->za[bz->f1].z1); /* end fifo */ if (maxlen > count) maxlen = count; /* limit size */ memcpy(dst, src, maxlen); /* first copy */ count -= maxlen; /* remaining bytes */ if (count) { dst = bdata; /* start of buffer */ src += maxlen; /* new position */ memcpy(dst, src, count); } bz->za[new_f1].z1 = cpu_to_le16(new_z1); /* for next buffer */ bz->f1 = new_f1; /* next frame */ dev_kfree_skb(bch->tx_skb); get_next_bframe(bch); } /* * handle L1 state changes TE */ static void ph_state_te(struct dchannel *dch) { if (dch->debug) printk(KERN_DEBUG "%s: TE newstate %x\n", __func__, dch->state); switch (dch->state) { case 0: l1_event(dch->l1, HW_RESET_IND); break; case 3: l1_event(dch->l1, HW_DEACT_IND); break; case 5: case 8: l1_event(dch->l1, ANYSIGNAL); break; case 6: l1_event(dch->l1, INFO2); break; case 7: l1_event(dch->l1, INFO4_P8); break; } } /* * handle L1 state changes NT */ static void handle_nt_timer3(struct dchannel *dch) { struct hfc_pci *hc = dch->hw; test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags); hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER; Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1); hc->hw.nt_timer = 0; test_and_set_bit(FLG_ACTIVE, &dch->Flags); if (test_bit(HFC_CFG_MASTER, &hc->cfg)) hc->hw.mst_m |= HFCPCI_MASTER; Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m); _queue_data(&dch->dev.D, PH_ACTIVATE_IND, MISDN_ID_ANY, 0, NULL, GFP_ATOMIC); } static void ph_state_nt(struct dchannel *dch) { struct hfc_pci *hc = dch->hw; u_char val; if (dch->debug) printk(KERN_DEBUG "%s: NT newstate %x\n", __func__, dch->state); switch (dch->state) { case 2: if (hc->hw.nt_timer < 0) { hc->hw.nt_timer = 0; test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags); test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags); hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER; Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1); /* Clear already pending ints */ val = Read_hfc(hc, HFCPCI_INT_S1); Write_hfc(hc, HFCPCI_STATES, 4 | HFCPCI_LOAD_STATE); udelay(10); Write_hfc(hc, HFCPCI_STATES, 4); dch->state = 4; } else if (hc->hw.nt_timer == 0) { hc->hw.int_m1 |= HFCPCI_INTS_TIMER; Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1); hc->hw.nt_timer = NT_T1_COUNT; hc->hw.ctmt &= ~HFCPCI_AUTO_TIMER; hc->hw.ctmt |= HFCPCI_TIM3_125; Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt | HFCPCI_CLTIMER); test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags); test_and_set_bit(FLG_HFC_TIMER_T1, &dch->Flags); /* allow G2 -> G3 transition */ Write_hfc(hc, HFCPCI_STATES, 2 | HFCPCI_NT_G2_G3); } else { Write_hfc(hc, HFCPCI_STATES, 2 | HFCPCI_NT_G2_G3); } break; case 1: hc->hw.nt_timer = 0; test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags); test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags); hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER; Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1); test_and_clear_bit(FLG_ACTIVE, &dch->Flags); hc->hw.mst_m &= ~HFCPCI_MASTER; Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m); test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags); _queue_data(&dch->dev.D, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0, NULL, GFP_ATOMIC); break; case 4: hc->hw.nt_timer = 0; test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags); test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags); hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER; Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1); break; case 3: if (!test_and_set_bit(FLG_HFC_TIMER_T3, &dch->Flags)) { if (!test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags)) { handle_nt_timer3(dch); break; } test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags); hc->hw.int_m1 |= HFCPCI_INTS_TIMER; Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1); hc->hw.nt_timer = NT_T3_COUNT; hc->hw.ctmt &= ~HFCPCI_AUTO_TIMER; hc->hw.ctmt |= HFCPCI_TIM3_125; Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt | HFCPCI_CLTIMER); } break; } } static void ph_state(struct dchannel *dch) { struct hfc_pci *hc = dch->hw; if (hc->hw.protocol == ISDN_P_NT_S0) { if (test_bit(FLG_HFC_TIMER_T3, &dch->Flags) && hc->hw.nt_timer < 0) handle_nt_timer3(dch); else ph_state_nt(dch); } else ph_state_te(dch); } /* * Layer 1 callback function */ static int hfc_l1callback(struct dchannel *dch, u_int cmd) { struct hfc_pci *hc = dch->hw; switch (cmd) { case INFO3_P8: case INFO3_P10: if (test_bit(HFC_CFG_MASTER, &hc->cfg)) hc->hw.mst_m |= HFCPCI_MASTER; Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m); break; case HW_RESET_REQ: Write_hfc(hc, HFCPCI_STATES, HFCPCI_LOAD_STATE | 3); /* HFC ST 3 */ udelay(6); Write_hfc(hc, HFCPCI_STATES, 3); /* HFC ST 2 */ if (test_bit(HFC_CFG_MASTER, &hc->cfg)) hc->hw.mst_m |= HFCPCI_MASTER; Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m); Write_hfc(hc, HFCPCI_STATES, HFCPCI_ACTIVATE | HFCPCI_DO_ACTION); l1_event(dch->l1, HW_POWERUP_IND); break; case HW_DEACT_REQ: hc->hw.mst_m &= ~HFCPCI_MASTER; Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m); skb_queue_purge(&dch->squeue); if (dch->tx_skb) { dev_kfree_skb(dch->tx_skb); dch->tx_skb = NULL; } dch->tx_idx = 0; if (dch->rx_skb) { dev_kfree_skb(dch->rx_skb); dch->rx_skb = NULL; } test_and_clear_bit(FLG_TX_BUSY, &dch->Flags); if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags)) del_timer(&dch->timer); break; case HW_POWERUP_REQ: Write_hfc(hc, HFCPCI_STATES, HFCPCI_DO_ACTION); break; case PH_ACTIVATE_IND: test_and_set_bit(FLG_ACTIVE, &dch->Flags); _queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL, GFP_ATOMIC); break; case PH_DEACTIVATE_IND: test_and_clear_bit(FLG_ACTIVE, &dch->Flags); _queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL, GFP_ATOMIC); break; default: if (dch->debug & DEBUG_HW) printk(KERN_DEBUG "%s: unknown command %x\n", __func__, cmd); return -1; } return 0; } /* * Interrupt handler */ static inline void tx_birq(struct bchannel *bch) { if (bch->tx_skb && bch->tx_idx < bch->tx_skb->len) hfcpci_fill_fifo(bch); else { if (bch->tx_skb) dev_kfree_skb(bch->tx_skb); if (get_next_bframe(bch)) hfcpci_fill_fifo(bch); } } static inline void tx_dirq(struct dchannel *dch) { if (dch->tx_skb && dch->tx_idx < dch->tx_skb->len) hfcpci_fill_dfifo(dch->hw); else { if (dch->tx_skb) dev_kfree_skb(dch->tx_skb); if (get_next_dframe(dch)) hfcpci_fill_dfifo(dch->hw); } } static irqreturn_t hfcpci_int(int intno, void *dev_id) { struct hfc_pci *hc = dev_id; u_char exval; struct bchannel *bch; u_char val, stat; spin_lock(&hc->lock); if (!(hc->hw.int_m2 & 0x08)) { spin_unlock(&hc->lock); return IRQ_NONE; /* not initialised */ } stat = Read_hfc(hc, HFCPCI_STATUS); if (HFCPCI_ANYINT & stat) { val = Read_hfc(hc, HFCPCI_INT_S1); if (hc->dch.debug & DEBUG_HW_DCHANNEL) printk(KERN_DEBUG "HFC-PCI: stat(%02x) s1(%02x)\n", stat, val); } else { /* shared */ spin_unlock(&hc->lock); return IRQ_NONE; } hc->irqcnt++; if (hc->dch.debug & DEBUG_HW_DCHANNEL) printk(KERN_DEBUG "HFC-PCI irq %x\n", val); val &= hc->hw.int_m1; if (val & 0x40) { /* state machine irq */ exval = Read_hfc(hc, HFCPCI_STATES) & 0xf; if (hc->dch.debug & DEBUG_HW_DCHANNEL) printk(KERN_DEBUG "ph_state chg %d->%d\n", hc->dch.state, exval); hc->dch.state = exval; schedule_event(&hc->dch, FLG_PHCHANGE); val &= ~0x40; } if (val & 0x80) { /* timer irq */ if (hc->hw.protocol == ISDN_P_NT_S0) { if ((--hc->hw.nt_timer) < 0) schedule_event(&hc->dch, FLG_PHCHANGE); } val &= ~0x80; Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt | HFCPCI_CLTIMER); } if (val & 0x08) { /* B1 rx */ bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1); if (bch) main_rec_hfcpci(bch); else if (hc->dch.debug) printk(KERN_DEBUG "hfcpci spurious 0x08 IRQ\n"); } if (val & 0x10) { /* B2 rx */ bch = Sel_BCS(hc, 2); if (bch) main_rec_hfcpci(bch); else if (hc->dch.debug) printk(KERN_DEBUG "hfcpci spurious 0x10 IRQ\n"); } if (val & 0x01) { /* B1 tx */ bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1); if (bch) tx_birq(bch); else if (hc->dch.debug) printk(KERN_DEBUG "hfcpci spurious 0x01 IRQ\n"); } if (val & 0x02) { /* B2 tx */ bch = Sel_BCS(hc, 2); if (bch) tx_birq(bch); else if (hc->dch.debug) printk(KERN_DEBUG "hfcpci spurious 0x02 IRQ\n"); } if (val & 0x20) /* D rx */ receive_dmsg(hc); if (val & 0x04) { /* D tx */ if (test_and_clear_bit(FLG_BUSY_TIMER, &hc->dch.Flags)) del_timer(&hc->dch.timer); tx_dirq(&hc->dch); } spin_unlock(&hc->lock); return IRQ_HANDLED; } /* * timer callback for D-chan busy resolution. Currently no function */ static void hfcpci_dbusy_timer(struct hfc_pci *hc) { } /* * activate/deactivate hardware for selected channels and mode */ static int mode_hfcpci(struct bchannel *bch, int bc, int protocol) { struct hfc_pci *hc = bch->hw; int fifo2; u_char rx_slot = 0, tx_slot = 0, pcm_mode; if (bch->debug & DEBUG_HW_BCHANNEL) printk(KERN_DEBUG "HFCPCI bchannel protocol %x-->%x ch %x-->%x\n", bch->state, protocol, bch->nr, bc); fifo2 = bc; pcm_mode = (bc>>24) & 0xff; if (pcm_mode) { /* PCM SLOT USE */ if (!test_bit(HFC_CFG_PCM, &hc->cfg)) printk(KERN_WARNING "%s: pcm channel id without HFC_CFG_PCM\n", __func__); rx_slot = (bc>>8) & 0xff; tx_slot = (bc>>16) & 0xff; bc = bc & 0xff; } else if (test_bit(HFC_CFG_PCM, &hc->cfg) && (protocol > ISDN_P_NONE)) printk(KERN_WARNING "%s: no pcm channel id but HFC_CFG_PCM\n", __func__); if (hc->chanlimit > 1) { hc->hw.bswapped = 0; /* B1 and B2 normal mode */ hc->hw.sctrl_e &= ~0x80; } else { if (bc & 2) { if (protocol != ISDN_P_NONE) { hc->hw.bswapped = 1; /* B1 and B2 exchanged */ hc->hw.sctrl_e |= 0x80; } else { hc->hw.bswapped = 0; /* B1 and B2 normal mode */ hc->hw.sctrl_e &= ~0x80; } fifo2 = 1; } else { hc->hw.bswapped = 0; /* B1 and B2 normal mode */ hc->hw.sctrl_e &= ~0x80; } } switch (protocol) { case (-1): /* used for init */ bch->state = -1; bch->nr = bc; case (ISDN_P_NONE): if (bch->state == ISDN_P_NONE) return 0; if (bc & 2) { hc->hw.sctrl &= ~SCTRL_B2_ENA; hc->hw.sctrl_r &= ~SCTRL_B2_ENA; } else { hc->hw.sctrl &= ~SCTRL_B1_ENA; hc->hw.sctrl_r &= ~SCTRL_B1_ENA; } if (fifo2 & 2) { hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B2; hc->hw.int_m1 &= ~(HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC); } else { hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B1; hc->hw.int_m1 &= ~(HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC); } #ifdef REVERSE_BITORDER if (bch->nr & 2) hc->hw.cirm &= 0x7f; else hc->hw.cirm &= 0xbf; #endif bch->state = ISDN_P_NONE; bch->nr = bc; test_and_clear_bit(FLG_HDLC, &bch->Flags); test_and_clear_bit(FLG_TRANSPARENT, &bch->Flags); break; case (ISDN_P_B_RAW): bch->state = protocol; bch->nr = bc; hfcpci_clear_fifo_rx(hc, (fifo2 & 2) ? 1 : 0); hfcpci_clear_fifo_tx(hc, (fifo2 & 2) ? 1 : 0); if (bc & 2) { hc->hw.sctrl |= SCTRL_B2_ENA; hc->hw.sctrl_r |= SCTRL_B2_ENA; #ifdef REVERSE_BITORDER hc->hw.cirm |= 0x80; #endif } else { hc->hw.sctrl |= SCTRL_B1_ENA; hc->hw.sctrl_r |= SCTRL_B1_ENA; #ifdef REVERSE_BITORDER hc->hw.cirm |= 0x40; #endif } if (fifo2 & 2) { hc->hw.fifo_en |= HFCPCI_FIFOEN_B2; if (!tics) hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC); hc->hw.ctmt |= 2; hc->hw.conn &= ~0x18; } else { hc->hw.fifo_en |= HFCPCI_FIFOEN_B1; if (!tics) hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC); hc->hw.ctmt |= 1; hc->hw.conn &= ~0x03; } test_and_set_bit(FLG_TRANSPARENT, &bch->Flags); break; case (ISDN_P_B_HDLC): bch->state = protocol; bch->nr = bc; hfcpci_clear_fifo_rx(hc, (fifo2 & 2) ? 1 : 0); hfcpci_clear_fifo_tx(hc, (fifo2 & 2) ? 1 : 0); if (bc & 2) { hc->hw.sctrl |= SCTRL_B2_ENA; hc->hw.sctrl_r |= SCTRL_B2_ENA; } else { hc->hw.sctrl |= SCTRL_B1_ENA; hc->hw.sctrl_r |= SCTRL_B1_ENA; } if (fifo2 & 2) { hc->hw.last_bfifo_cnt[1] = 0; hc->hw.fifo_en |= HFCPCI_FIFOEN_B2; hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC); hc->hw.ctmt &= ~2; hc->hw.conn &= ~0x18; } else { hc->hw.last_bfifo_cnt[0] = 0; hc->hw.fifo_en |= HFCPCI_FIFOEN_B1; hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC); hc->hw.ctmt &= ~1; hc->hw.conn &= ~0x03; } test_and_set_bit(FLG_HDLC, &bch->Flags); break; default: printk(KERN_DEBUG "prot not known %x\n", protocol); return -ENOPROTOOPT; } if (test_bit(HFC_CFG_PCM, &hc->cfg)) { if ((protocol == ISDN_P_NONE) || (protocol == -1)) { /* init case */ rx_slot = 0; tx_slot = 0; } else { if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg)) { rx_slot |= 0xC0; tx_slot |= 0xC0; } else { rx_slot |= 0x80; tx_slot |= 0x80; } } if (bc & 2) { hc->hw.conn &= 0xc7; hc->hw.conn |= 0x08; printk(KERN_DEBUG "%s: Write_hfc: B2_SSL 0x%x\n", __func__, tx_slot); printk(KERN_DEBUG "%s: Write_hfc: B2_RSL 0x%x\n", __func__, rx_slot); Write_hfc(hc, HFCPCI_B2_SSL, tx_slot); Write_hfc(hc, HFCPCI_B2_RSL, rx_slot); } else { hc->hw.conn &= 0xf8; hc->hw.conn |= 0x01; printk(KERN_DEBUG "%s: Write_hfc: B1_SSL 0x%x\n", __func__, tx_slot); printk(KERN_DEBUG "%s: Write_hfc: B1_RSL 0x%x\n", __func__, rx_slot); Write_hfc(hc, HFCPCI_B1_SSL, tx_slot); Write_hfc(hc, HFCPCI_B1_RSL, rx_slot); } } Write_hfc(hc, HFCPCI_SCTRL_E, hc->hw.sctrl_e); Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1); Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en); Write_hfc(hc, HFCPCI_SCTRL, hc->hw.sctrl); Write_hfc(hc, HFCPCI_SCTRL_R, hc->hw.sctrl_r); Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt); Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn); #ifdef REVERSE_BITORDER Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm); #endif return 0; } static int set_hfcpci_rxtest(struct bchannel *bch, int protocol, int chan) { struct hfc_pci *hc = bch->hw; if (bch->debug & DEBUG_HW_BCHANNEL) printk(KERN_DEBUG "HFCPCI bchannel test rx protocol %x-->%x ch %x-->%x\n", bch->state, protocol, bch->nr, chan); if (bch->nr != chan) { printk(KERN_DEBUG "HFCPCI rxtest wrong channel parameter %x/%x\n", bch->nr, chan); return -EINVAL; } switch (protocol) { case (ISDN_P_B_RAW): bch->state = protocol; hfcpci_clear_fifo_rx(hc, (chan & 2) ? 1 : 0); if (chan & 2) { hc->hw.sctrl_r |= SCTRL_B2_ENA; hc->hw.fifo_en |= HFCPCI_FIFOEN_B2RX; if (!tics) hc->hw.int_m1 |= HFCPCI_INTS_B2REC; hc->hw.ctmt |= 2; hc->hw.conn &= ~0x18; #ifdef REVERSE_BITORDER hc->hw.cirm |= 0x80; #endif } else { hc->hw.sctrl_r |= SCTRL_B1_ENA; hc->hw.fifo_en |= HFCPCI_FIFOEN_B1RX; if (!tics) hc->hw.int_m1 |= HFCPCI_INTS_B1REC; hc->hw.ctmt |= 1; hc->hw.conn &= ~0x03; #ifdef REVERSE_BITORDER hc->hw.cirm |= 0x40; #endif } break; case (ISDN_P_B_HDLC): bch->state = protocol; hfcpci_clear_fifo_rx(hc, (chan & 2) ? 1 : 0); if (chan & 2) { hc->hw.sctrl_r |= SCTRL_B2_ENA; hc->hw.last_bfifo_cnt[1] = 0; hc->hw.fifo_en |= HFCPCI_FIFOEN_B2RX; hc->hw.int_m1 |= HFCPCI_INTS_B2REC; hc->hw.ctmt &= ~2; hc->hw.conn &= ~0x18; } else { hc->hw.sctrl_r |= SCTRL_B1_ENA; hc->hw.last_bfifo_cnt[0] = 0; hc->hw.fifo_en |= HFCPCI_FIFOEN_B1RX; hc->hw.int_m1 |= HFCPCI_INTS_B1REC; hc->hw.ctmt &= ~1; hc->hw.conn &= ~0x03; } break; default: printk(KERN_DEBUG "prot not known %x\n", protocol); return -ENOPROTOOPT; } Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1); Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en); Write_hfc(hc, HFCPCI_SCTRL_R, hc->hw.sctrl_r); Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt); Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn); #ifdef REVERSE_BITORDER Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm); #endif return 0; } static void deactivate_bchannel(struct bchannel *bch) { struct hfc_pci *hc = bch->hw; u_long flags; spin_lock_irqsave(&hc->lock, flags); mISDN_clear_bchannel(bch); mode_hfcpci(bch, bch->nr, ISDN_P_NONE); spin_unlock_irqrestore(&hc->lock, flags); } /* * Layer 1 B-channel hardware access */ static int channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) { int ret = 0; switch (cq->op) { case MISDN_CTRL_GETOP: cq->op = MISDN_CTRL_FILL_EMPTY; break; case MISDN_CTRL_FILL_EMPTY: /* fill fifo, if empty */ test_and_set_bit(FLG_FILLEMPTY, &bch->Flags); if (debug & DEBUG_HW_OPEN) printk(KERN_DEBUG "%s: FILL_EMPTY request (nr=%d " "off=%d)\n", __func__, bch->nr, !!cq->p1); break; default: printk(KERN_WARNING "%s: unknown Op %x\n", __func__, cq->op); ret = -EINVAL; break; } return ret; } static int hfc_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg) { struct bchannel *bch = container_of(ch, struct bchannel, ch); struct hfc_pci *hc = bch->hw; int ret = -EINVAL; u_long flags; if (bch->debug & DEBUG_HW) printk(KERN_DEBUG "%s: cmd:%x %p\n", __func__, cmd, arg); switch (cmd) { case HW_TESTRX_RAW: spin_lock_irqsave(&hc->lock, flags); ret = set_hfcpci_rxtest(bch, ISDN_P_B_RAW, (int)(long)arg); spin_unlock_irqrestore(&hc->lock, flags); break; case HW_TESTRX_HDLC: spin_lock_irqsave(&hc->lock, flags); ret = set_hfcpci_rxtest(bch, ISDN_P_B_HDLC, (int)(long)arg); spin_unlock_irqrestore(&hc->lock, flags); break; case HW_TESTRX_OFF: spin_lock_irqsave(&hc->lock, flags); mode_hfcpci(bch, bch->nr, ISDN_P_NONE); spin_unlock_irqrestore(&hc->lock, flags); ret = 0; break; case CLOSE_CHANNEL: test_and_clear_bit(FLG_OPEN, &bch->Flags); if (test_bit(FLG_ACTIVE, &bch->Flags)) deactivate_bchannel(bch); ch->protocol = ISDN_P_NONE; ch->peer = NULL; module_put(THIS_MODULE); ret = 0; break; case CONTROL_CHANNEL: ret = channel_bctrl(bch, arg); break; default: printk(KERN_WARNING "%s: unknown prim(%x)\n", __func__, cmd); } return ret; } /* * Layer2 -> Layer 1 Dchannel data */ static int hfcpci_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb) { struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D); struct dchannel *dch = container_of(dev, struct dchannel, dev); struct hfc_pci *hc = dch->hw; int ret = -EINVAL; struct mISDNhead *hh = mISDN_HEAD_P(skb); unsigned int id; u_long flags; switch (hh->prim) { case PH_DATA_REQ: spin_lock_irqsave(&hc->lock, flags); ret = dchannel_senddata(dch, skb); if (ret > 0) { /* direct TX */ id = hh->id; /* skb can be freed */ hfcpci_fill_dfifo(dch->hw); ret = 0; spin_unlock_irqrestore(&hc->lock, flags); queue_ch_frame(ch, PH_DATA_CNF, id, NULL); } else spin_unlock_irqrestore(&hc->lock, flags); return ret; case PH_ACTIVATE_REQ: spin_lock_irqsave(&hc->lock, flags); if (hc->hw.protocol == ISDN_P_NT_S0) { ret = 0; if (test_bit(HFC_CFG_MASTER, &hc->cfg)) hc->hw.mst_m |= HFCPCI_MASTER; Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m); if (test_bit(FLG_ACTIVE, &dch->Flags)) { spin_unlock_irqrestore(&hc->lock, flags); _queue_data(&dch->dev.D, PH_ACTIVATE_IND, MISDN_ID_ANY, 0, NULL, GFP_ATOMIC); break; } test_and_set_bit(FLG_L2_ACTIVATED, &dch->Flags); Write_hfc(hc, HFCPCI_STATES, HFCPCI_ACTIVATE | HFCPCI_DO_ACTION | 1); } else ret = l1_event(dch->l1, hh->prim); spin_unlock_irqrestore(&hc->lock, flags); break; case PH_DEACTIVATE_REQ: test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags); spin_lock_irqsave(&hc->lock, flags); if (hc->hw.protocol == ISDN_P_NT_S0) { /* prepare deactivation */ Write_hfc(hc, HFCPCI_STATES, 0x40); skb_queue_purge(&dch->squeue); if (dch->tx_skb) { dev_kfree_skb(dch->tx_skb); dch->tx_skb = NULL; } dch->tx_idx = 0; if (dch->rx_skb) { dev_kfree_skb(dch->rx_skb); dch->rx_skb = NULL; } test_and_clear_bit(FLG_TX_BUSY, &dch->Flags); if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags)) del_timer(&dch->timer); #ifdef FIXME if (test_and_clear_bit(FLG_L1_BUSY, &dch->Flags)) dchannel_sched_event(&hc->dch, D_CLEARBUSY); #endif hc->hw.mst_m &= ~HFCPCI_MASTER; Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m); ret = 0; } else { ret = l1_event(dch->l1, hh->prim); } spin_unlock_irqrestore(&hc->lock, flags); break; } if (!ret) dev_kfree_skb(skb); return ret; } /* * Layer2 -> Layer 1 Bchannel data */ static int hfcpci_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb) { struct bchannel *bch = container_of(ch, struct bchannel, ch); struct hfc_pci *hc = bch->hw; int ret = -EINVAL; struct mISDNhead *hh = mISDN_HEAD_P(skb); unsigned int id; u_long flags; switch (hh->prim) { case PH_DATA_REQ: spin_lock_irqsave(&hc->lock, flags); ret = bchannel_senddata(bch, skb); if (ret > 0) { /* direct TX */ id = hh->id; /* skb can be freed */ hfcpci_fill_fifo(bch); ret = 0; spin_unlock_irqrestore(&hc->lock, flags); if (!test_bit(FLG_TRANSPARENT, &bch->Flags)) queue_ch_frame(ch, PH_DATA_CNF, id, NULL); } else spin_unlock_irqrestore(&hc->lock, flags); return ret; case PH_ACTIVATE_REQ: spin_lock_irqsave(&hc->lock, flags); if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags)) ret = mode_hfcpci(bch, bch->nr, ch->protocol); else ret = 0; spin_unlock_irqrestore(&hc->lock, flags); if (!ret) _queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY, 0, NULL, GFP_KERNEL); break; case PH_DEACTIVATE_REQ: deactivate_bchannel(bch); _queue_data(ch, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0, NULL, GFP_KERNEL); ret = 0; break; } if (!ret) dev_kfree_skb(skb); return ret; } /* * called for card init message */ static void inithfcpci(struct hfc_pci *hc) { printk(KERN_DEBUG "inithfcpci: entered\n"); hc->dch.timer.function = (void *) hfcpci_dbusy_timer; hc->dch.timer.data = (long) &hc->dch; init_timer(&hc->dch.timer); hc->chanlimit = 2; mode_hfcpci(&hc->bch[0], 1, -1); mode_hfcpci(&hc->bch[1], 2, -1); } static int init_card(struct hfc_pci *hc) { int cnt = 3; u_long flags; printk(KERN_DEBUG "init_card: entered\n"); spin_lock_irqsave(&hc->lock, flags); disable_hwirq(hc); spin_unlock_irqrestore(&hc->lock, flags); if (request_irq(hc->irq, hfcpci_int, IRQF_SHARED, "HFC PCI", hc)) { printk(KERN_WARNING "mISDN: couldn't get interrupt %d\n", hc->irq); return -EIO; } spin_lock_irqsave(&hc->lock, flags); reset_hfcpci(hc); while (cnt) { inithfcpci(hc); /* * Finally enable IRQ output * this is only allowed, if an IRQ routine is allready * established for this HFC, so don't do that earlier */ enable_hwirq(hc); spin_unlock_irqrestore(&hc->lock, flags); /* Timeout 80ms */ current->state = TASK_UNINTERRUPTIBLE; schedule_timeout((80*HZ)/1000); printk(KERN_INFO "HFC PCI: IRQ %d count %d\n", hc->irq, hc->irqcnt); /* now switch timer interrupt off */ spin_lock_irqsave(&hc->lock, flags); hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER; Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1); /* reinit mode reg */ Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m); if (!hc->irqcnt) { printk(KERN_WARNING "HFC PCI: IRQ(%d) getting no interrupts " "during init %d\n", hc->irq, 4 - cnt); if (cnt == 1) break; else { reset_hfcpci(hc); cnt--; } } else { spin_unlock_irqrestore(&hc->lock, flags); hc->initdone = 1; return 0; } } disable_hwirq(hc); spin_unlock_irqrestore(&hc->lock, flags); free_irq(hc->irq, hc); return -EIO; } static int channel_ctrl(struct hfc_pci *hc, struct mISDN_ctrl_req *cq) { int ret = 0; u_char slot; switch (cq->op) { case MISDN_CTRL_GETOP: cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_CONNECT | MISDN_CTRL_DISCONNECT; break; case MISDN_CTRL_LOOP: /* channel 0 disabled loop */ if (cq->channel < 0 || cq->channel > 2) { ret = -EINVAL; break; } if (cq->channel & 1) { if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg)) slot = 0xC0; else slot = 0x80; printk(KERN_DEBUG "%s: Write_hfc: B1_SSL/RSL 0x%x\n", __func__, slot); Write_hfc(hc, HFCPCI_B1_SSL, slot); Write_hfc(hc, HFCPCI_B1_RSL, slot); hc->hw.conn = (hc->hw.conn & ~7) | 6; Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn); } if (cq->channel & 2) { if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg)) slot = 0xC1; else slot = 0x81; printk(KERN_DEBUG "%s: Write_hfc: B2_SSL/RSL 0x%x\n", __func__, slot); Write_hfc(hc, HFCPCI_B2_SSL, slot); Write_hfc(hc, HFCPCI_B2_RSL, slot); hc->hw.conn = (hc->hw.conn & ~0x38) | 0x30; Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn); } if (cq->channel & 3) hc->hw.trm |= 0x80; /* enable IOM-loop */ else { hc->hw.conn = (hc->hw.conn & ~0x3f) | 0x09; Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn); hc->hw.trm &= 0x7f; /* disable IOM-loop */ } Write_hfc(hc, HFCPCI_TRM, hc->hw.trm); break; case MISDN_CTRL_CONNECT: if (cq->channel == cq->p1) { ret = -EINVAL; break; } if (cq->channel < 1 || cq->channel > 2 || cq->p1 < 1 || cq->p1 > 2) { ret = -EINVAL; break; } if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg)) slot = 0xC0; else slot = 0x80; printk(KERN_DEBUG "%s: Write_hfc: B1_SSL/RSL 0x%x\n", __func__, slot); Write_hfc(hc, HFCPCI_B1_SSL, slot); Write_hfc(hc, HFCPCI_B2_RSL, slot); if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg)) slot = 0xC1; else slot = 0x81; printk(KERN_DEBUG "%s: Write_hfc: B2_SSL/RSL 0x%x\n", __func__, slot); Write_hfc(hc, HFCPCI_B2_SSL, slot); Write_hfc(hc, HFCPCI_B1_RSL, slot); hc->hw.conn = (hc->hw.conn & ~0x3f) | 0x36; Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn); hc->hw.trm |= 0x80; Write_hfc(hc, HFCPCI_TRM, hc->hw.trm); break; case MISDN_CTRL_DISCONNECT: hc->hw.conn = (hc->hw.conn & ~0x3f) | 0x09; Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn); hc->hw.trm &= 0x7f; /* disable IOM-loop */ break; default: printk(KERN_WARNING "%s: unknown Op %x\n", __func__, cq->op); ret = -EINVAL; break; } return ret; } static int open_dchannel(struct hfc_pci *hc, struct mISDNchannel *ch, struct channel_req *rq) { int err = 0; if (debug & DEBUG_HW_OPEN) printk(KERN_DEBUG "%s: dev(%d) open from %p\n", __func__, hc->dch.dev.id, __builtin_return_address(0)); if (rq->protocol == ISDN_P_NONE) return -EINVAL; if (rq->adr.channel == 1) { /* TODO: E-Channel */ return -EINVAL; } if (!hc->initdone) { if (rq->protocol == ISDN_P_TE_S0) { err = create_l1(&hc->dch, hfc_l1callback); if (err) return err; } hc->hw.protocol = rq->protocol; ch->protocol = rq->protocol; err = init_card(hc); if (err) return err; } else { if (rq->protocol != ch->protocol) { if (hc->hw.protocol == ISDN_P_TE_S0) l1_event(hc->dch.l1, CLOSE_CHANNEL); if (rq->protocol == ISDN_P_TE_S0) { err = create_l1(&hc->dch, hfc_l1callback); if (err) return err; } hc->hw.protocol = rq->protocol; ch->protocol = rq->protocol; hfcpci_setmode(hc); } } if (((ch->protocol == ISDN_P_NT_S0) && (hc->dch.state == 3)) || ((ch->protocol == ISDN_P_TE_S0) && (hc->dch.state == 7))) { _queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY, 0, NULL, GFP_KERNEL); } rq->ch = ch; if (!try_module_get(THIS_MODULE)) printk(KERN_WARNING "%s:cannot get module\n", __func__); return 0; } static int open_bchannel(struct hfc_pci *hc, struct channel_req *rq) { struct bchannel *bch; if (rq->adr.channel > 2) return -EINVAL; if (rq->protocol == ISDN_P_NONE) return -EINVAL; bch = &hc->bch[rq->adr.channel - 1]; if (test_and_set_bit(FLG_OPEN, &bch->Flags)) return -EBUSY; /* b-channel can be only open once */ test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags); bch->ch.protocol = rq->protocol; rq->ch = &bch->ch; /* TODO: E-channel */ if (!try_module_get(THIS_MODULE)) printk(KERN_WARNING "%s:cannot get module\n", __func__); return 0; } /* * device control function */ static int hfc_dctrl(struct mISDNchannel *ch, u_int cmd, void *arg) { struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D); struct dchannel *dch = container_of(dev, struct dchannel, dev); struct hfc_pci *hc = dch->hw; struct channel_req *rq; int err = 0; if (dch->debug & DEBUG_HW) printk(KERN_DEBUG "%s: cmd:%x %p\n", __func__, cmd, arg); switch (cmd) { case OPEN_CHANNEL: rq = arg; if ((rq->protocol == ISDN_P_TE_S0) || (rq->protocol == ISDN_P_NT_S0)) err = open_dchannel(hc, ch, rq); else err = open_bchannel(hc, rq); break; case CLOSE_CHANNEL: if (debug & DEBUG_HW_OPEN) printk(KERN_DEBUG "%s: dev(%d) close from %p\n", __func__, hc->dch.dev.id, __builtin_return_address(0)); module_put(THIS_MODULE); break; case CONTROL_CHANNEL: err = channel_ctrl(hc, arg); break; default: if (dch->debug & DEBUG_HW) printk(KERN_DEBUG "%s: unknown command %x\n", __func__, cmd); return -EINVAL; } return err; } static int setup_hw(struct hfc_pci *hc) { void *buffer; printk(KERN_INFO "mISDN: HFC-PCI driver %s\n", hfcpci_revision); hc->hw.cirm = 0; hc->dch.state = 0; pci_set_master(hc->pdev); if (!hc->irq) { printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n"); return 1; } hc->hw.pci_io = (char __iomem *)(unsigned long)hc->pdev->resource[1].start; if (!hc->hw.pci_io) { printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n"); return 1; } /* Allocate memory for FIFOS */ /* the memory needs to be on a 32k boundary within the first 4G */ pci_set_dma_mask(hc->pdev, 0xFFFF8000); buffer = pci_alloc_consistent(hc->pdev, 0x8000, &hc->hw.dmahandle); /* We silently assume the address is okay if nonzero */ if (!buffer) { printk(KERN_WARNING "HFC-PCI: Error allocating memory for FIFO!\n"); return 1; } hc->hw.fifos = buffer; pci_write_config_dword(hc->pdev, 0x80, hc->hw.dmahandle); hc->hw.pci_io = ioremap((ulong) hc->hw.pci_io, 256); printk(KERN_INFO "HFC-PCI: defined at mem %#lx fifo %#lx(%#lx) IRQ %d HZ %d\n", (u_long) hc->hw.pci_io, (u_long) hc->hw.fifos, (u_long) hc->hw.dmahandle, hc->irq, HZ); /* enable memory mapped ports, disable busmaster */ pci_write_config_word(hc->pdev, PCI_COMMAND, PCI_ENA_MEMIO); hc->hw.int_m2 = 0; disable_hwirq(hc); hc->hw.int_m1 = 0; Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1); /* At this point the needed PCI config is done */ /* fifos are still not enabled */ hc->hw.timer.function = (void *) hfcpci_Timer; hc->hw.timer.data = (long) hc; init_timer(&hc->hw.timer); /* default PCM master */ test_and_set_bit(HFC_CFG_MASTER, &hc->cfg); return 0; } static void release_card(struct hfc_pci *hc) { u_long flags; spin_lock_irqsave(&hc->lock, flags); hc->hw.int_m2 = 0; /* interrupt output off ! */ disable_hwirq(hc); mode_hfcpci(&hc->bch[0], 1, ISDN_P_NONE); mode_hfcpci(&hc->bch[1], 2, ISDN_P_NONE); if (hc->dch.timer.function != NULL) { del_timer(&hc->dch.timer); hc->dch.timer.function = NULL; } spin_unlock_irqrestore(&hc->lock, flags); if (hc->hw.protocol == ISDN_P_TE_S0) l1_event(hc->dch.l1, CLOSE_CHANNEL); if (hc->initdone) free_irq(hc->irq, hc); release_io_hfcpci(hc); /* must release after free_irq! */ mISDN_unregister_device(&hc->dch.dev); mISDN_freebchannel(&hc->bch[1]); mISDN_freebchannel(&hc->bch[0]); mISDN_freedchannel(&hc->dch); pci_set_drvdata(hc->pdev, NULL); kfree(hc); } static int setup_card(struct hfc_pci *card) { int err = -EINVAL; u_int i; char name[MISDN_MAX_IDLEN]; card->dch.debug = debug; spin_lock_init(&card->lock); mISDN_initdchannel(&card->dch, MAX_DFRAME_LEN_L1, ph_state); card->dch.hw = card; card->dch.dev.Dprotocols = (1 << ISDN_P_TE_S0) | (1 << ISDN_P_NT_S0); card->dch.dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) | (1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK)); card->dch.dev.D.send = hfcpci_l2l1D; card->dch.dev.D.ctrl = hfc_dctrl; card->dch.dev.nrbchan = 2; for (i = 0; i < 2; i++) { card->bch[i].nr = i + 1; set_channelmap(i + 1, card->dch.dev.channelmap); card->bch[i].debug = debug; mISDN_initbchannel(&card->bch[i], MAX_DATA_MEM); card->bch[i].hw = card; card->bch[i].ch.send = hfcpci_l2l1B; card->bch[i].ch.ctrl = hfc_bctrl; card->bch[i].ch.nr = i + 1; list_add(&card->bch[i].ch.list, &card->dch.dev.bchannels); } err = setup_hw(card); if (err) goto error; snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-pci.%d", HFC_cnt + 1); err = mISDN_register_device(&card->dch.dev, &card->pdev->dev, name); if (err) goto error; HFC_cnt++; printk(KERN_INFO "HFC %d cards installed\n", HFC_cnt); return 0; error: mISDN_freebchannel(&card->bch[1]); mISDN_freebchannel(&card->bch[0]); mISDN_freedchannel(&card->dch); kfree(card); return err; } /* private data in the PCI devices list */ struct _hfc_map { u_int subtype; u_int flag; char *name; }; static const struct _hfc_map hfc_map[] = { {HFC_CCD_2BD0, 0, "CCD/Billion/Asuscom 2BD0"}, {HFC_CCD_B000, 0, "Billion B000"}, {HFC_CCD_B006, 0, "Billion B006"}, {HFC_CCD_B007, 0, "Billion B007"}, {HFC_CCD_B008, 0, "Billion B008"}, {HFC_CCD_B009, 0, "Billion B009"}, {HFC_CCD_B00A, 0, "Billion B00A"}, {HFC_CCD_B00B, 0, "Billion B00B"}, {HFC_CCD_B00C, 0, "Billion B00C"}, {HFC_CCD_B100, 0, "Seyeon B100"}, {HFC_CCD_B700, 0, "Primux II S0 B700"}, {HFC_CCD_B701, 0, "Primux II S0 NT B701"}, {HFC_ABOCOM_2BD1, 0, "Abocom/Magitek 2BD1"}, {HFC_ASUS_0675, 0, "Asuscom/Askey 675"}, {HFC_BERKOM_TCONCEPT, 0, "German telekom T-Concept"}, {HFC_BERKOM_A1T, 0, "German telekom A1T"}, {HFC_ANIGMA_MC145575, 0, "Motorola MC145575"}, {HFC_ZOLTRIX_2BD0, 0, "Zoltrix 2BD0"}, {HFC_DIGI_DF_M_IOM2_E, 0, "Digi International DataFire Micro V IOM2 (Europe)"}, {HFC_DIGI_DF_M_E, 0, "Digi International DataFire Micro V (Europe)"}, {HFC_DIGI_DF_M_IOM2_A, 0, "Digi International DataFire Micro V IOM2 (North America)"}, {HFC_DIGI_DF_M_A, 0, "Digi International DataFire Micro V (North America)"}, {HFC_SITECOM_DC105V2, 0, "Sitecom Connectivity DC-105 ISDN TA"}, {}, }; static struct pci_device_id hfc_ids[] = { {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_2BD0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[0]}, {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[1]}, {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B006, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[2]}, {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[3]}, {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[4]}, {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[5]}, {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[6]}, {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[7]}, {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[8]}, {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[9]}, {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B700, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[10]}, {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[11]}, {PCI_VENDOR_ID_ABOCOM, PCI_DEVICE_ID_ABOCOM_2BD1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[12]}, {PCI_VENDOR_ID_ASUSTEK, PCI_DEVICE_ID_ASUSTEK_0675, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[13]}, {PCI_VENDOR_ID_BERKOM, PCI_DEVICE_ID_BERKOM_T_CONCEPT, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[14]}, {PCI_VENDOR_ID_BERKOM, PCI_DEVICE_ID_BERKOM_A1T, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[15]}, {PCI_VENDOR_ID_ANIGMA, PCI_DEVICE_ID_ANIGMA_MC145575, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[16]}, {PCI_VENDOR_ID_ZOLTRIX, PCI_DEVICE_ID_ZOLTRIX_2BD0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[17]}, {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[18]}, {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[19]}, {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[20]}, {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[21]}, {PCI_VENDOR_ID_SITECOM, PCI_DEVICE_ID_SITECOM_DC105V2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[22]}, {}, }; static int __devinit hfc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { int err = -ENOMEM; struct hfc_pci *card; struct _hfc_map *m = (struct _hfc_map *)ent->driver_data; card = kzalloc(sizeof(struct hfc_pci), GFP_ATOMIC); if (!card) { printk(KERN_ERR "No kmem for HFC card\n"); return err; } card->pdev = pdev; card->subtype = m->subtype; err = pci_enable_device(pdev); if (err) { kfree(card); return err; } printk(KERN_INFO "mISDN_hfcpci: found adapter %s at %s\n", m->name, pci_name(pdev)); card->irq = pdev->irq; pci_set_drvdata(pdev, card); err = setup_card(card); if (err) pci_set_drvdata(pdev, NULL); return err; } static void __devexit hfc_remove_pci(struct pci_dev *pdev) { struct hfc_pci *card = pci_get_drvdata(pdev); if (card) release_card(card); else if (debug) printk(KERN_DEBUG "%s: drvdata already removed\n", __func__); } static struct pci_driver hfc_driver = { .name = "hfcpci", .probe = hfc_probe, .remove = __devexit_p(hfc_remove_pci), .id_table = hfc_ids, }; static int _hfcpci_softirq(struct device *dev, void *arg) { struct hfc_pci *hc = dev_get_drvdata(dev); struct bchannel *bch; if (hc == NULL) return 0; if (hc->hw.int_m2 & HFCPCI_IRQ_ENABLE) { spin_lock(&hc->lock); bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1); if (bch && bch->state == ISDN_P_B_RAW) { /* B1 rx&tx */ main_rec_hfcpci(bch); tx_birq(bch); } bch = Sel_BCS(hc, hc->hw.bswapped ? 1 : 2); if (bch && bch->state == ISDN_P_B_RAW) { /* B2 rx&tx */ main_rec_hfcpci(bch); tx_birq(bch); } spin_unlock(&hc->lock); } return 0; } static void hfcpci_softirq(void *arg) { (void) driver_for_each_device(&hfc_driver.driver, NULL, arg, _hfcpci_softirq); /* if next event would be in the past ... */ if ((s32)(hfc_jiffies + tics - jiffies) <= 0) hfc_jiffies = jiffies + 1; else hfc_jiffies += tics; hfc_tl.expires = hfc_jiffies; add_timer(&hfc_tl); } static int __init HFC_init(void) { int err; if (!poll) poll = HFCPCI_BTRANS_THRESHOLD; if (poll != HFCPCI_BTRANS_THRESHOLD) { tics = (poll * HZ) / 8000; if (tics < 1) tics = 1; poll = (tics * 8000) / HZ; if (poll > 256 || poll < 8) { printk(KERN_ERR "%s: Wrong poll value %d not in range " "of 8..256.\n", __func__, poll); err = -EINVAL; return err; } } if (poll != HFCPCI_BTRANS_THRESHOLD) { printk(KERN_INFO "%s: Using alternative poll value of %d\n", __func__, poll); hfc_tl.function = (void *)hfcpci_softirq; hfc_tl.data = 0; init_timer(&hfc_tl); hfc_tl.expires = jiffies + tics; hfc_jiffies = hfc_tl.expires; add_timer(&hfc_tl); } else tics = 0; /* indicate the use of controller's timer */ err = pci_register_driver(&hfc_driver); if (err) { if (timer_pending(&hfc_tl)) del_timer(&hfc_tl); } return err; } static void __exit HFC_cleanup(void) { if (timer_pending(&hfc_tl)) del_timer(&hfc_tl); pci_unregister_driver(&hfc_driver); } module_init(HFC_init); module_exit(HFC_cleanup); MODULE_DEVICE_TABLE(pci, hfc_ids);
gpl-2.0
mirror-androidarmv6/android_kernel_huawei_msm7x25
drivers/video/msm/mdp_ppp22.c
521
29587
/* drivers/video/msm/mdp_ppp22.c * * Copyright (C) 2007 QUALCOMM Incorporated * Copyright (C) 2007 Google Incorporated * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <asm/io.h> #include <linux/msm_mdp.h> #include "mdp_hw.h" #include "mdp_ppp.h" struct mdp_table_entry { uint32_t reg; uint32_t val; }; enum { MDP_DOWNSCALE_PT2TOPT4, MDP_DOWNSCALE_PT4TOPT6, MDP_DOWNSCALE_PT6TOPT8, MDP_DOWNSCALE_PT8TO1, MDP_DOWNSCALE_MAX, /* not technically in the downscale table list */ MDP_DOWNSCALE_BLUR, }; static int downscale_x_table; static int downscale_y_table; static struct mdp_table_entry mdp_upscale_table[] = { { 0x5fffc, 0x0 }, { 0x50200, 0x7fc00000 }, { 0x5fffc, 0xff80000d }, { 0x50204, 0x7ec003f9 }, { 0x5fffc, 0xfec0001c }, { 0x50208, 0x7d4003f3 }, { 0x5fffc, 0xfe40002b }, { 0x5020c, 0x7b8003ed }, { 0x5fffc, 0xfd80003c }, { 0x50210, 0x794003e8 }, { 0x5fffc, 0xfcc0004d }, { 0x50214, 0x76c003e4 }, { 0x5fffc, 0xfc40005f }, { 0x50218, 0x73c003e0 }, { 0x5fffc, 0xfb800071 }, { 0x5021c, 0x708003de }, { 0x5fffc, 0xfac00085 }, { 0x50220, 0x6d0003db }, { 0x5fffc, 0xfa000098 }, { 0x50224, 0x698003d9 }, { 0x5fffc, 0xf98000ac }, { 0x50228, 0x654003d8 }, { 0x5fffc, 0xf8c000c1 }, { 0x5022c, 0x610003d7 }, { 0x5fffc, 0xf84000d5 }, { 0x50230, 0x5c8003d7 }, { 0x5fffc, 0xf7c000e9 }, { 0x50234, 0x580003d7 }, { 0x5fffc, 0xf74000fd }, { 0x50238, 0x534003d8 }, { 0x5fffc, 0xf6c00112 }, { 0x5023c, 0x4e8003d8 }, { 0x5fffc, 0xf6800126 }, { 0x50240, 0x494003da }, { 0x5fffc, 0xf600013a }, { 0x50244, 0x448003db }, { 0x5fffc, 0xf600014d }, { 0x50248, 0x3f4003dd }, { 0x5fffc, 0xf5c00160 }, { 0x5024c, 0x3a4003df }, { 0x5fffc, 0xf5c00172 }, { 0x50250, 0x354003e1 }, { 0x5fffc, 0xf5c00184 }, { 0x50254, 0x304003e3 }, { 0x5fffc, 0xf6000195 }, { 0x50258, 0x2b0003e6 }, { 0x5fffc, 0xf64001a6 }, { 0x5025c, 0x260003e8 }, { 0x5fffc, 0xf6c001b4 }, { 0x50260, 0x214003eb }, { 0x5fffc, 0xf78001c2 }, { 0x50264, 0x1c4003ee }, { 0x5fffc, 0xf80001cf }, { 0x50268, 0x17c003f1 }, { 0x5fffc, 0xf90001db }, { 0x5026c, 0x134003f3 }, { 0x5fffc, 0xfa0001e5 }, { 0x50270, 0xf0003f6 }, { 0x5fffc, 0xfb4001ee }, { 0x50274, 0xac003f9 }, { 0x5fffc, 0xfcc001f5 }, { 0x50278, 0x70003fb }, { 0x5fffc, 0xfe4001fb }, { 0x5027c, 0x34003fe }, }; static struct mdp_table_entry mdp_downscale_x_table_PT2TOPT4[] = { { 0x5fffc, 0x740008c }, { 0x50280, 0x33800088 }, { 0x5fffc, 0x800008e }, { 0x50284, 0x33400084 }, { 0x5fffc, 0x8400092 }, { 0x50288, 0x33000080 }, { 0x5fffc, 0x9000094 }, { 0x5028c, 0x3300007b }, { 0x5fffc, 0x9c00098 }, { 0x50290, 0x32400077 }, { 0x5fffc, 0xa40009b }, { 0x50294, 0x32000073 }, { 0x5fffc, 0xb00009d }, { 0x50298, 0x31c0006f }, { 0x5fffc, 0xbc000a0 }, { 0x5029c, 0x3140006b }, { 0x5fffc, 0xc8000a2 }, { 0x502a0, 0x31000067 }, { 0x5fffc, 0xd8000a5 }, { 0x502a4, 0x30800062 }, { 0x5fffc, 0xe4000a8 }, { 0x502a8, 0x2fc0005f }, { 0x5fffc, 0xec000aa }, { 0x502ac, 0x2fc0005b }, { 0x5fffc, 0xf8000ad }, { 0x502b0, 0x2f400057 }, { 0x5fffc, 0x108000b0 }, { 0x502b4, 0x2e400054 }, { 0x5fffc, 0x114000b2 }, { 0x502b8, 0x2e000050 }, { 0x5fffc, 0x124000b4 }, { 0x502bc, 0x2d80004c }, { 0x5fffc, 0x130000b6 }, { 0x502c0, 0x2d000049 }, { 0x5fffc, 0x140000b8 }, { 0x502c4, 0x2c800045 }, { 0x5fffc, 0x150000b9 }, { 0x502c8, 0x2c000042 }, { 0x5fffc, 0x15c000bd }, { 0x502cc, 0x2b40003e }, { 0x5fffc, 0x16c000bf }, { 0x502d0, 0x2a80003b }, { 0x5fffc, 0x17c000bf }, { 0x502d4, 0x2a000039 }, { 0x5fffc, 0x188000c2 }, { 0x502d8, 0x29400036 }, { 0x5fffc, 0x19c000c4 }, { 0x502dc, 0x28800032 }, { 0x5fffc, 0x1ac000c5 }, { 0x502e0, 0x2800002f }, { 0x5fffc, 0x1bc000c7 }, { 0x502e4, 0x2740002c }, { 0x5fffc, 0x1cc000c8 }, { 0x502e8, 0x26c00029 }, { 0x5fffc, 0x1dc000c9 }, { 0x502ec, 0x26000027 }, { 0x5fffc, 0x1ec000cc }, { 0x502f0, 0x25000024 }, { 0x5fffc, 0x200000cc }, { 0x502f4, 0x24800021 }, { 0x5fffc, 0x210000cd }, { 0x502f8, 0x23800020 }, { 0x5fffc, 0x220000ce }, { 0x502fc, 0x2300001d }, }; static struct mdp_table_entry mdp_downscale_x_table_PT4TOPT6[] = { { 0x5fffc, 0x740008c }, { 0x50280, 0x33800088 }, { 0x5fffc, 0x800008e }, { 0x50284, 0x33400084 }, { 0x5fffc, 0x8400092 }, { 0x50288, 0x33000080 }, { 0x5fffc, 0x9000094 }, { 0x5028c, 0x3300007b }, { 0x5fffc, 0x9c00098 }, { 0x50290, 0x32400077 }, { 0x5fffc, 0xa40009b }, { 0x50294, 0x32000073 }, { 0x5fffc, 0xb00009d }, { 0x50298, 0x31c0006f }, { 0x5fffc, 0xbc000a0 }, { 0x5029c, 0x3140006b }, { 0x5fffc, 0xc8000a2 }, { 0x502a0, 0x31000067 }, { 0x5fffc, 0xd8000a5 }, { 0x502a4, 0x30800062 }, { 0x5fffc, 0xe4000a8 }, { 0x502a8, 0x2fc0005f }, { 0x5fffc, 0xec000aa }, { 0x502ac, 0x2fc0005b }, { 0x5fffc, 0xf8000ad }, { 0x502b0, 0x2f400057 }, { 0x5fffc, 0x108000b0 }, { 0x502b4, 0x2e400054 }, { 0x5fffc, 0x114000b2 }, { 0x502b8, 0x2e000050 }, { 0x5fffc, 0x124000b4 }, { 0x502bc, 0x2d80004c }, { 0x5fffc, 0x130000b6 }, { 0x502c0, 0x2d000049 }, { 0x5fffc, 0x140000b8 }, { 0x502c4, 0x2c800045 }, { 0x5fffc, 0x150000b9 }, { 0x502c8, 0x2c000042 }, { 0x5fffc, 0x15c000bd }, { 0x502cc, 0x2b40003e }, { 0x5fffc, 0x16c000bf }, { 0x502d0, 0x2a80003b }, { 0x5fffc, 0x17c000bf }, { 0x502d4, 0x2a000039 }, { 0x5fffc, 0x188000c2 }, { 0x502d8, 0x29400036 }, { 0x5fffc, 0x19c000c4 }, { 0x502dc, 0x28800032 }, { 0x5fffc, 0x1ac000c5 }, { 0x502e0, 0x2800002f }, { 0x5fffc, 0x1bc000c7 }, { 0x502e4, 0x2740002c }, { 0x5fffc, 0x1cc000c8 }, { 0x502e8, 0x26c00029 }, { 0x5fffc, 0x1dc000c9 }, { 0x502ec, 0x26000027 }, { 0x5fffc, 0x1ec000cc }, { 0x502f0, 0x25000024 }, { 0x5fffc, 0x200000cc }, { 0x502f4, 0x24800021 }, { 0x5fffc, 0x210000cd }, { 0x502f8, 0x23800020 }, { 0x5fffc, 0x220000ce }, { 0x502fc, 0x2300001d }, }; static struct mdp_table_entry mdp_downscale_x_table_PT6TOPT8[] = { { 0x5fffc, 0xfe000070 }, { 0x50280, 0x4bc00068 }, { 0x5fffc, 0xfe000078 }, { 0x50284, 0x4bc00060 }, { 0x5fffc, 0xfe000080 }, { 0x50288, 0x4b800059 }, { 0x5fffc, 0xfe000089 }, { 0x5028c, 0x4b000052 }, { 0x5fffc, 0xfe400091 }, { 0x50290, 0x4a80004b }, { 0x5fffc, 0xfe40009a }, { 0x50294, 0x4a000044 }, { 0x5fffc, 0xfe8000a3 }, { 0x50298, 0x4940003d }, { 0x5fffc, 0xfec000ac }, { 0x5029c, 0x48400037 }, { 0x5fffc, 0xff0000b4 }, { 0x502a0, 0x47800031 }, { 0x5fffc, 0xff8000bd }, { 0x502a4, 0x4640002b }, { 0x5fffc, 0xc5 }, { 0x502a8, 0x45000026 }, { 0x5fffc, 0x8000ce }, { 0x502ac, 0x43800021 }, { 0x5fffc, 0x10000d6 }, { 0x502b0, 0x4240001c }, { 0x5fffc, 0x18000df }, { 0x502b4, 0x40800018 }, { 0x5fffc, 0x24000e6 }, { 0x502b8, 0x3f000014 }, { 0x5fffc, 0x30000ee }, { 0x502bc, 0x3d400010 }, { 0x5fffc, 0x40000f5 }, { 0x502c0, 0x3b80000c }, { 0x5fffc, 0x50000fc }, { 0x502c4, 0x39800009 }, { 0x5fffc, 0x6000102 }, { 0x502c8, 0x37c00006 }, { 0x5fffc, 0x7000109 }, { 0x502cc, 0x35800004 }, { 0x5fffc, 0x840010e }, { 0x502d0, 0x33800002 }, { 0x5fffc, 0x9800114 }, { 0x502d4, 0x31400000 }, { 0x5fffc, 0xac00119 }, { 0x502d8, 0x2f4003fe }, { 0x5fffc, 0xc40011e }, { 0x502dc, 0x2d0003fc }, { 0x5fffc, 0xdc00121 }, { 0x502e0, 0x2b0003fb }, { 0x5fffc, 0xf400125 }, { 0x502e4, 0x28c003fa }, { 0x5fffc, 0x11000128 }, { 0x502e8, 0x268003f9 }, { 0x5fffc, 0x12c0012a }, { 0x502ec, 0x244003f9 }, { 0x5fffc, 0x1480012c }, { 0x502f0, 0x224003f8 }, { 0x5fffc, 0x1640012e }, { 0x502f4, 0x200003f8 }, { 0x5fffc, 0x1800012f }, { 0x502f8, 0x1e0003f8 }, { 0x5fffc, 0x1a00012f }, { 0x502fc, 0x1c0003f8 }, }; static struct mdp_table_entry mdp_downscale_x_table_PT8TO1[] = { { 0x5fffc, 0x0 }, { 0x50280, 0x7fc00000 }, { 0x5fffc, 0xff80000d }, { 0x50284, 0x7ec003f9 }, { 0x5fffc, 0xfec0001c }, { 0x50288, 0x7d4003f3 }, { 0x5fffc, 0xfe40002b }, { 0x5028c, 0x7b8003ed }, { 0x5fffc, 0xfd80003c }, { 0x50290, 0x794003e8 }, { 0x5fffc, 0xfcc0004d }, { 0x50294, 0x76c003e4 }, { 0x5fffc, 0xfc40005f }, { 0x50298, 0x73c003e0 }, { 0x5fffc, 0xfb800071 }, { 0x5029c, 0x708003de }, { 0x5fffc, 0xfac00085 }, { 0x502a0, 0x6d0003db }, { 0x5fffc, 0xfa000098 }, { 0x502a4, 0x698003d9 }, { 0x5fffc, 0xf98000ac }, { 0x502a8, 0x654003d8 }, { 0x5fffc, 0xf8c000c1 }, { 0x502ac, 0x610003d7 }, { 0x5fffc, 0xf84000d5 }, { 0x502b0, 0x5c8003d7 }, { 0x5fffc, 0xf7c000e9 }, { 0x502b4, 0x580003d7 }, { 0x5fffc, 0xf74000fd }, { 0x502b8, 0x534003d8 }, { 0x5fffc, 0xf6c00112 }, { 0x502bc, 0x4e8003d8 }, { 0x5fffc, 0xf6800126 }, { 0x502c0, 0x494003da }, { 0x5fffc, 0xf600013a }, { 0x502c4, 0x448003db }, { 0x5fffc, 0xf600014d }, { 0x502c8, 0x3f4003dd }, { 0x5fffc, 0xf5c00160 }, { 0x502cc, 0x3a4003df }, { 0x5fffc, 0xf5c00172 }, { 0x502d0, 0x354003e1 }, { 0x5fffc, 0xf5c00184 }, { 0x502d4, 0x304003e3 }, { 0x5fffc, 0xf6000195 }, { 0x502d8, 0x2b0003e6 }, { 0x5fffc, 0xf64001a6 }, { 0x502dc, 0x260003e8 }, { 0x5fffc, 0xf6c001b4 }, { 0x502e0, 0x214003eb }, { 0x5fffc, 0xf78001c2 }, { 0x502e4, 0x1c4003ee }, { 0x5fffc, 0xf80001cf }, { 0x502e8, 0x17c003f1 }, { 0x5fffc, 0xf90001db }, { 0x502ec, 0x134003f3 }, { 0x5fffc, 0xfa0001e5 }, { 0x502f0, 0xf0003f6 }, { 0x5fffc, 0xfb4001ee }, { 0x502f4, 0xac003f9 }, { 0x5fffc, 0xfcc001f5 }, { 0x502f8, 0x70003fb }, { 0x5fffc, 0xfe4001fb }, { 0x502fc, 0x34003fe }, }; struct mdp_table_entry *mdp_downscale_x_table[MDP_DOWNSCALE_MAX] = { [MDP_DOWNSCALE_PT2TOPT4] = mdp_downscale_x_table_PT2TOPT4, [MDP_DOWNSCALE_PT4TOPT6] = mdp_downscale_x_table_PT4TOPT6, [MDP_DOWNSCALE_PT6TOPT8] = mdp_downscale_x_table_PT6TOPT8, [MDP_DOWNSCALE_PT8TO1] = mdp_downscale_x_table_PT8TO1, }; static struct mdp_table_entry mdp_downscale_y_table_PT2TOPT4[] = { { 0x5fffc, 0x740008c }, { 0x50300, 0x33800088 }, { 0x5fffc, 0x800008e }, { 0x50304, 0x33400084 }, { 0x5fffc, 0x8400092 }, { 0x50308, 0x33000080 }, { 0x5fffc, 0x9000094 }, { 0x5030c, 0x3300007b }, { 0x5fffc, 0x9c00098 }, { 0x50310, 0x32400077 }, { 0x5fffc, 0xa40009b }, { 0x50314, 0x32000073 }, { 0x5fffc, 0xb00009d }, { 0x50318, 0x31c0006f }, { 0x5fffc, 0xbc000a0 }, { 0x5031c, 0x3140006b }, { 0x5fffc, 0xc8000a2 }, { 0x50320, 0x31000067 }, { 0x5fffc, 0xd8000a5 }, { 0x50324, 0x30800062 }, { 0x5fffc, 0xe4000a8 }, { 0x50328, 0x2fc0005f }, { 0x5fffc, 0xec000aa }, { 0x5032c, 0x2fc0005b }, { 0x5fffc, 0xf8000ad }, { 0x50330, 0x2f400057 }, { 0x5fffc, 0x108000b0 }, { 0x50334, 0x2e400054 }, { 0x5fffc, 0x114000b2 }, { 0x50338, 0x2e000050 }, { 0x5fffc, 0x124000b4 }, { 0x5033c, 0x2d80004c }, { 0x5fffc, 0x130000b6 }, { 0x50340, 0x2d000049 }, { 0x5fffc, 0x140000b8 }, { 0x50344, 0x2c800045 }, { 0x5fffc, 0x150000b9 }, { 0x50348, 0x2c000042 }, { 0x5fffc, 0x15c000bd }, { 0x5034c, 0x2b40003e }, { 0x5fffc, 0x16c000bf }, { 0x50350, 0x2a80003b }, { 0x5fffc, 0x17c000bf }, { 0x50354, 0x2a000039 }, { 0x5fffc, 0x188000c2 }, { 0x50358, 0x29400036 }, { 0x5fffc, 0x19c000c4 }, { 0x5035c, 0x28800032 }, { 0x5fffc, 0x1ac000c5 }, { 0x50360, 0x2800002f }, { 0x5fffc, 0x1bc000c7 }, { 0x50364, 0x2740002c }, { 0x5fffc, 0x1cc000c8 }, { 0x50368, 0x26c00029 }, { 0x5fffc, 0x1dc000c9 }, { 0x5036c, 0x26000027 }, { 0x5fffc, 0x1ec000cc }, { 0x50370, 0x25000024 }, { 0x5fffc, 0x200000cc }, { 0x50374, 0x24800021 }, { 0x5fffc, 0x210000cd }, { 0x50378, 0x23800020 }, { 0x5fffc, 0x220000ce }, { 0x5037c, 0x2300001d }, }; static struct mdp_table_entry mdp_downscale_y_table_PT4TOPT6[] = { { 0x5fffc, 0x740008c }, { 0x50300, 0x33800088 }, { 0x5fffc, 0x800008e }, { 0x50304, 0x33400084 }, { 0x5fffc, 0x8400092 }, { 0x50308, 0x33000080 }, { 0x5fffc, 0x9000094 }, { 0x5030c, 0x3300007b }, { 0x5fffc, 0x9c00098 }, { 0x50310, 0x32400077 }, { 0x5fffc, 0xa40009b }, { 0x50314, 0x32000073 }, { 0x5fffc, 0xb00009d }, { 0x50318, 0x31c0006f }, { 0x5fffc, 0xbc000a0 }, { 0x5031c, 0x3140006b }, { 0x5fffc, 0xc8000a2 }, { 0x50320, 0x31000067 }, { 0x5fffc, 0xd8000a5 }, { 0x50324, 0x30800062 }, { 0x5fffc, 0xe4000a8 }, { 0x50328, 0x2fc0005f }, { 0x5fffc, 0xec000aa }, { 0x5032c, 0x2fc0005b }, { 0x5fffc, 0xf8000ad }, { 0x50330, 0x2f400057 }, { 0x5fffc, 0x108000b0 }, { 0x50334, 0x2e400054 }, { 0x5fffc, 0x114000b2 }, { 0x50338, 0x2e000050 }, { 0x5fffc, 0x124000b4 }, { 0x5033c, 0x2d80004c }, { 0x5fffc, 0x130000b6 }, { 0x50340, 0x2d000049 }, { 0x5fffc, 0x140000b8 }, { 0x50344, 0x2c800045 }, { 0x5fffc, 0x150000b9 }, { 0x50348, 0x2c000042 }, { 0x5fffc, 0x15c000bd }, { 0x5034c, 0x2b40003e }, { 0x5fffc, 0x16c000bf }, { 0x50350, 0x2a80003b }, { 0x5fffc, 0x17c000bf }, { 0x50354, 0x2a000039 }, { 0x5fffc, 0x188000c2 }, { 0x50358, 0x29400036 }, { 0x5fffc, 0x19c000c4 }, { 0x5035c, 0x28800032 }, { 0x5fffc, 0x1ac000c5 }, { 0x50360, 0x2800002f }, { 0x5fffc, 0x1bc000c7 }, { 0x50364, 0x2740002c }, { 0x5fffc, 0x1cc000c8 }, { 0x50368, 0x26c00029 }, { 0x5fffc, 0x1dc000c9 }, { 0x5036c, 0x26000027 }, { 0x5fffc, 0x1ec000cc }, { 0x50370, 0x25000024 }, { 0x5fffc, 0x200000cc }, { 0x50374, 0x24800021 }, { 0x5fffc, 0x210000cd }, { 0x50378, 0x23800020 }, { 0x5fffc, 0x220000ce }, { 0x5037c, 0x2300001d }, }; static struct mdp_table_entry mdp_downscale_y_table_PT6TOPT8[] = { { 0x5fffc, 0xfe000070 }, { 0x50300, 0x4bc00068 }, { 0x5fffc, 0xfe000078 }, { 0x50304, 0x4bc00060 }, { 0x5fffc, 0xfe000080 }, { 0x50308, 0x4b800059 }, { 0x5fffc, 0xfe000089 }, { 0x5030c, 0x4b000052 }, { 0x5fffc, 0xfe400091 }, { 0x50310, 0x4a80004b }, { 0x5fffc, 0xfe40009a }, { 0x50314, 0x4a000044 }, { 0x5fffc, 0xfe8000a3 }, { 0x50318, 0x4940003d }, { 0x5fffc, 0xfec000ac }, { 0x5031c, 0x48400037 }, { 0x5fffc, 0xff0000b4 }, { 0x50320, 0x47800031 }, { 0x5fffc, 0xff8000bd }, { 0x50324, 0x4640002b }, { 0x5fffc, 0xc5 }, { 0x50328, 0x45000026 }, { 0x5fffc, 0x8000ce }, { 0x5032c, 0x43800021 }, { 0x5fffc, 0x10000d6 }, { 0x50330, 0x4240001c }, { 0x5fffc, 0x18000df }, { 0x50334, 0x40800018 }, { 0x5fffc, 0x24000e6 }, { 0x50338, 0x3f000014 }, { 0x5fffc, 0x30000ee }, { 0x5033c, 0x3d400010 }, { 0x5fffc, 0x40000f5 }, { 0x50340, 0x3b80000c }, { 0x5fffc, 0x50000fc }, { 0x50344, 0x39800009 }, { 0x5fffc, 0x6000102 }, { 0x50348, 0x37c00006 }, { 0x5fffc, 0x7000109 }, { 0x5034c, 0x35800004 }, { 0x5fffc, 0x840010e }, { 0x50350, 0x33800002 }, { 0x5fffc, 0x9800114 }, { 0x50354, 0x31400000 }, { 0x5fffc, 0xac00119 }, { 0x50358, 0x2f4003fe }, { 0x5fffc, 0xc40011e }, { 0x5035c, 0x2d0003fc }, { 0x5fffc, 0xdc00121 }, { 0x50360, 0x2b0003fb }, { 0x5fffc, 0xf400125 }, { 0x50364, 0x28c003fa }, { 0x5fffc, 0x11000128 }, { 0x50368, 0x268003f9 }, { 0x5fffc, 0x12c0012a }, { 0x5036c, 0x244003f9 }, { 0x5fffc, 0x1480012c }, { 0x50370, 0x224003f8 }, { 0x5fffc, 0x1640012e }, { 0x50374, 0x200003f8 }, { 0x5fffc, 0x1800012f }, { 0x50378, 0x1e0003f8 }, { 0x5fffc, 0x1a00012f }, { 0x5037c, 0x1c0003f8 }, }; static struct mdp_table_entry mdp_downscale_y_table_PT8TO1[] = { { 0x5fffc, 0x0 }, { 0x50300, 0x7fc00000 }, { 0x5fffc, 0xff80000d }, { 0x50304, 0x7ec003f9 }, { 0x5fffc, 0xfec0001c }, { 0x50308, 0x7d4003f3 }, { 0x5fffc, 0xfe40002b }, { 0x5030c, 0x7b8003ed }, { 0x5fffc, 0xfd80003c }, { 0x50310, 0x794003e8 }, { 0x5fffc, 0xfcc0004d }, { 0x50314, 0x76c003e4 }, { 0x5fffc, 0xfc40005f }, { 0x50318, 0x73c003e0 }, { 0x5fffc, 0xfb800071 }, { 0x5031c, 0x708003de }, { 0x5fffc, 0xfac00085 }, { 0x50320, 0x6d0003db }, { 0x5fffc, 0xfa000098 }, { 0x50324, 0x698003d9 }, { 0x5fffc, 0xf98000ac }, { 0x50328, 0x654003d8 }, { 0x5fffc, 0xf8c000c1 }, { 0x5032c, 0x610003d7 }, { 0x5fffc, 0xf84000d5 }, { 0x50330, 0x5c8003d7 }, { 0x5fffc, 0xf7c000e9 }, { 0x50334, 0x580003d7 }, { 0x5fffc, 0xf74000fd }, { 0x50338, 0x534003d8 }, { 0x5fffc, 0xf6c00112 }, { 0x5033c, 0x4e8003d8 }, { 0x5fffc, 0xf6800126 }, { 0x50340, 0x494003da }, { 0x5fffc, 0xf600013a }, { 0x50344, 0x448003db }, { 0x5fffc, 0xf600014d }, { 0x50348, 0x3f4003dd }, { 0x5fffc, 0xf5c00160 }, { 0x5034c, 0x3a4003df }, { 0x5fffc, 0xf5c00172 }, { 0x50350, 0x354003e1 }, { 0x5fffc, 0xf5c00184 }, { 0x50354, 0x304003e3 }, { 0x5fffc, 0xf6000195 }, { 0x50358, 0x2b0003e6 }, { 0x5fffc, 0xf64001a6 }, { 0x5035c, 0x260003e8 }, { 0x5fffc, 0xf6c001b4 }, { 0x50360, 0x214003eb }, { 0x5fffc, 0xf78001c2 }, { 0x50364, 0x1c4003ee }, { 0x5fffc, 0xf80001cf }, { 0x50368, 0x17c003f1 }, { 0x5fffc, 0xf90001db }, { 0x5036c, 0x134003f3 }, { 0x5fffc, 0xfa0001e5 }, { 0x50370, 0xf0003f6 }, { 0x5fffc, 0xfb4001ee }, { 0x50374, 0xac003f9 }, { 0x5fffc, 0xfcc001f5 }, { 0x50378, 0x70003fb }, { 0x5fffc, 0xfe4001fb }, { 0x5037c, 0x34003fe }, }; struct mdp_table_entry *mdp_downscale_y_table[MDP_DOWNSCALE_MAX] = { [MDP_DOWNSCALE_PT2TOPT4] = mdp_downscale_y_table_PT2TOPT4, [MDP_DOWNSCALE_PT4TOPT6] = mdp_downscale_y_table_PT4TOPT6, [MDP_DOWNSCALE_PT6TOPT8] = mdp_downscale_y_table_PT6TOPT8, [MDP_DOWNSCALE_PT8TO1] = mdp_downscale_y_table_PT8TO1, }; struct mdp_table_entry mdp_gaussian_blur_table[] = { /* max variance */ { 0x5fffc, 0x20000080 }, { 0x50280, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50284, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50288, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x5028c, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50290, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50294, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50298, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x5029c, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502a0, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502a4, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502a8, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502ac, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502b0, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502b4, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502b8, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502bc, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502c0, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502c4, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502c8, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502cc, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502d0, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502d4, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502d8, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502dc, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502e0, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502e4, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502e8, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502ec, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502f0, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502f4, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502f8, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x502fc, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50300, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50304, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50308, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x5030c, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50310, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50314, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50318, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x5031c, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50320, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50324, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50328, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x5032c, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50330, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50334, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50338, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x5033c, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50340, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50344, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50348, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x5034c, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50350, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50354, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50358, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x5035c, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50360, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50364, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50368, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x5036c, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50370, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50374, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x50378, 0x20000080 }, { 0x5fffc, 0x20000080 }, { 0x5037c, 0x20000080 }, }; static void load_table(const struct mdp_info *mdp, struct mdp_table_entry *table, int len) { int i; for (i = 0; i < len; i++) mdp_writel(mdp, table[i].val, table[i].reg); } enum { IMG_LEFT, IMG_RIGHT, IMG_TOP, IMG_BOTTOM, }; static void get_edge_info(uint32_t src, uint32_t src_coord, uint32_t dst, uint32_t *interp1, uint32_t *interp2, uint32_t *repeat1, uint32_t *repeat2) { if (src > 3 * dst) { *interp1 = 0; *interp2 = src - 1; *repeat1 = 0; *repeat2 = 0; } else if (src == 3 * dst) { *interp1 = 0; *interp2 = src; *repeat1 = 0; *repeat2 = 1; } else if (src > dst && src < 3 * dst) { *interp1 = -1; *interp2 = src; *repeat1 = 1; *repeat2 = 1; } else if (src == dst) { *interp1 = -1; *interp2 = src + 1; *repeat1 = 1; *repeat2 = 2; } else { *interp1 = -2; *interp2 = src + 1; *repeat1 = 2; *repeat2 = 2; } *interp1 += src_coord; *interp2 += src_coord; } int mdp_ppp_cfg_edge_cond(struct mdp_blit_req *req, struct ppp_regs *regs) { int32_t luma_interp[4]; int32_t luma_repeat[4]; int32_t chroma_interp[4]; int32_t chroma_bound[4]; int32_t chroma_repeat[4]; uint32_t dst_w, dst_h; memset(&luma_interp, 0, sizeof(int32_t) * 4); memset(&luma_repeat, 0, sizeof(int32_t) * 4); memset(&chroma_interp, 0, sizeof(int32_t) * 4); memset(&chroma_bound, 0, sizeof(int32_t) * 4); memset(&chroma_repeat, 0, sizeof(int32_t) * 4); regs->edge = 0; if (req->flags & MDP_ROT_90) { dst_w = req->dst_rect.h; dst_h = req->dst_rect.w; } else { dst_w = req->dst_rect.w; dst_h = req->dst_rect.h; } if (regs->op & (PPP_OP_SCALE_Y_ON | PPP_OP_SCALE_X_ON)) { get_edge_info(req->src_rect.h, req->src_rect.y, dst_h, &luma_interp[IMG_TOP], &luma_interp[IMG_BOTTOM], &luma_repeat[IMG_TOP], &luma_repeat[IMG_BOTTOM]); get_edge_info(req->src_rect.w, req->src_rect.x, dst_w, &luma_interp[IMG_LEFT], &luma_interp[IMG_RIGHT], &luma_repeat[IMG_LEFT], &luma_repeat[IMG_RIGHT]); } else { luma_interp[IMG_LEFT] = req->src_rect.x; luma_interp[IMG_RIGHT] = req->src_rect.x + req->src_rect.w - 1; luma_interp[IMG_TOP] = req->src_rect.y; luma_interp[IMG_BOTTOM] = req->src_rect.y + req->src_rect.h - 1; luma_repeat[IMG_LEFT] = 0; luma_repeat[IMG_TOP] = 0; luma_repeat[IMG_RIGHT] = 0; luma_repeat[IMG_BOTTOM] = 0; } chroma_interp[IMG_LEFT] = luma_interp[IMG_LEFT]; chroma_interp[IMG_RIGHT] = luma_interp[IMG_RIGHT]; chroma_interp[IMG_TOP] = luma_interp[IMG_TOP]; chroma_interp[IMG_BOTTOM] = luma_interp[IMG_BOTTOM]; chroma_bound[IMG_LEFT] = req->src_rect.x; chroma_bound[IMG_RIGHT] = req->src_rect.x + req->src_rect.w - 1; chroma_bound[IMG_TOP] = req->src_rect.y; chroma_bound[IMG_BOTTOM] = req->src_rect.y + req->src_rect.h - 1; if (IS_YCRCB(req->src.format)) { chroma_interp[IMG_LEFT] = chroma_interp[IMG_LEFT] >> 1; chroma_interp[IMG_RIGHT] = (chroma_interp[IMG_RIGHT] + 1) >> 1; chroma_bound[IMG_LEFT] = chroma_bound[IMG_LEFT] >> 1; chroma_bound[IMG_RIGHT] = chroma_bound[IMG_RIGHT] >> 1; } if (req->src.format == MDP_Y_CBCR_H2V2 || req->src.format == MDP_Y_CRCB_H2V2) { chroma_interp[IMG_TOP] = (chroma_interp[IMG_TOP] - 1) >> 1; chroma_interp[IMG_BOTTOM] = (chroma_interp[IMG_BOTTOM] + 1) >> 1; chroma_bound[IMG_TOP] = (chroma_bound[IMG_TOP] + 1) >> 1; chroma_bound[IMG_BOTTOM] = chroma_bound[IMG_BOTTOM] >> 1; } chroma_repeat[IMG_LEFT] = chroma_bound[IMG_LEFT] - chroma_interp[IMG_LEFT]; chroma_repeat[IMG_RIGHT] = chroma_interp[IMG_RIGHT] - chroma_bound[IMG_RIGHT]; chroma_repeat[IMG_TOP] = chroma_bound[IMG_TOP] - chroma_interp[IMG_TOP]; chroma_repeat[IMG_BOTTOM] = chroma_interp[IMG_BOTTOM] - chroma_bound[IMG_BOTTOM]; if (chroma_repeat[IMG_LEFT] < 0 || chroma_repeat[IMG_LEFT] > 3 || chroma_repeat[IMG_RIGHT] < 0 || chroma_repeat[IMG_RIGHT] > 3 || chroma_repeat[IMG_TOP] < 0 || chroma_repeat[IMG_TOP] > 3 || chroma_repeat[IMG_BOTTOM] < 0 || chroma_repeat[IMG_BOTTOM] > 3 || luma_repeat[IMG_LEFT] < 0 || luma_repeat[IMG_LEFT] > 3 || luma_repeat[IMG_RIGHT] < 0 || luma_repeat[IMG_RIGHT] > 3 || luma_repeat[IMG_TOP] < 0 || luma_repeat[IMG_TOP] > 3 || luma_repeat[IMG_BOTTOM] < 0 || luma_repeat[IMG_BOTTOM] > 3) return -1; regs->edge |= (chroma_repeat[IMG_LEFT] & 3) << MDP_LEFT_CHROMA; regs->edge |= (chroma_repeat[IMG_RIGHT] & 3) << MDP_RIGHT_CHROMA; regs->edge |= (chroma_repeat[IMG_TOP] & 3) << MDP_TOP_CHROMA; regs->edge |= (chroma_repeat[IMG_BOTTOM] & 3) << MDP_BOTTOM_CHROMA; regs->edge |= (luma_repeat[IMG_LEFT] & 3) << MDP_LEFT_LUMA; regs->edge |= (luma_repeat[IMG_RIGHT] & 3) << MDP_RIGHT_LUMA; regs->edge |= (luma_repeat[IMG_TOP] & 3) << MDP_TOP_LUMA; regs->edge |= (luma_repeat[IMG_BOTTOM] & 3) << MDP_BOTTOM_LUMA; return 0; } #define ONE_HALF (1LL << 32) #define ONE (1LL << 33) #define TWO (2LL << 33) #define THREE (3LL << 33) #define FRAC_MASK (ONE - 1) #define INT_MASK (~FRAC_MASK) static int scale_params(uint32_t dim_in, uint32_t dim_out, uint32_t origin, uint32_t *phase_init, uint32_t *phase_step) { /* to improve precicsion calculations are done in U31.33 and converted * to U3.29 at the end */ int64_t k1, k2, k3, k4, tmp; uint64_t n, d, os, os_p, od, od_p, oreq; unsigned rpa = 0; int64_t ip64, delta; if (dim_out % 3 == 0) rpa = !(dim_in % (dim_out / 3)); n = ((uint64_t)dim_out) << 34; d = dim_in; if (!d) return -1; do_div(n, d); k3 = (n + 1) >> 1; if ((k3 >> 4) < (1LL << 27) || (k3 >> 4) > (1LL << 31)) return -1; n = ((uint64_t)dim_in) << 34; d = (uint64_t)dim_out; if (!d) return -1; do_div(n, d); k1 = (n + 1) >> 1; k2 = (k1 - ONE) >> 1; *phase_init = (int)(k2 >> 4); k4 = (k3 - ONE) >> 1; if (rpa) { os = ((uint64_t)origin << 33) - ONE_HALF; tmp = (dim_out * os) + ONE_HALF; if (!dim_in) return -1; do_div(tmp, dim_in); od = tmp - ONE_HALF; } else { os = ((uint64_t)origin << 1) - 1; od = (((k3 * os) >> 1) + k4); } od_p = od & INT_MASK; if (od_p != od) od_p += ONE; if (rpa) { tmp = (dim_in * od_p) + ONE_HALF; if (!dim_in) return -1; do_div(tmp, dim_in); os_p = tmp - ONE_HALF; } else { os_p = ((k1 * (od_p >> 33)) + k2); } oreq = (os_p & INT_MASK) - ONE; ip64 = os_p - oreq; delta = ((int64_t)(origin) << 33) - oreq; ip64 -= delta; /* limit to valid range before the left shift */ delta = (ip64 & (1LL << 63)) ? 4 : -4; delta <<= 33; while (abs((int)(ip64 >> 33)) > 4) ip64 += delta; *phase_init = (int)(ip64 >> 4); *phase_step = (uint32_t)(k1 >> 4); return 0; } int mdp_ppp_cfg_scale(const struct mdp_info *mdp, struct ppp_regs *regs, struct mdp_rect *src_rect, struct mdp_rect *dst_rect, uint32_t src_format, uint32_t dst_format) { int downscale; uint32_t phase_init_x, phase_init_y, phase_step_x, phase_step_y; uint32_t scale_factor_x, scale_factor_y; if (scale_params(src_rect->w, dst_rect->w, 1, &phase_init_x, &phase_step_x) || scale_params(src_rect->h, dst_rect->h, 1, &phase_init_y, &phase_step_y)) return -1; regs->phasex_init = phase_init_x; regs->phasey_init = phase_init_y; regs->phasex_step = phase_step_x; regs->phasey_step = phase_step_y; scale_factor_x = (dst_rect->w * 10) / src_rect->w; scale_factor_y = (dst_rect->h * 10) / src_rect->h; if (scale_factor_x > 8) downscale = MDP_DOWNSCALE_PT8TO1; else if (scale_factor_x > 6) downscale = MDP_DOWNSCALE_PT6TOPT8; else if (scale_factor_x > 4) downscale = MDP_DOWNSCALE_PT4TOPT6; else downscale = MDP_DOWNSCALE_PT2TOPT4; if (downscale != downscale_x_table) { load_table(mdp, mdp_downscale_x_table[downscale], 64); downscale_x_table = downscale; } if (scale_factor_y > 8) downscale = MDP_DOWNSCALE_PT8TO1; else if (scale_factor_y > 6) downscale = MDP_DOWNSCALE_PT6TOPT8; else if (scale_factor_y > 4) downscale = MDP_DOWNSCALE_PT4TOPT6; else downscale = MDP_DOWNSCALE_PT2TOPT4; if (downscale != downscale_y_table) { load_table(mdp, mdp_downscale_y_table[downscale], 64); downscale_y_table = downscale; } return 0; } int mdp_ppp_load_blur(const struct mdp_info *mdp) { if (!(downscale_x_table == MDP_DOWNSCALE_BLUR && downscale_y_table == MDP_DOWNSCALE_BLUR)) { load_table(mdp, mdp_gaussian_blur_table, 128); downscale_x_table = MDP_DOWNSCALE_BLUR; downscale_y_table = MDP_DOWNSCALE_BLUR; } return 0; } void mdp_ppp_init_scale(const struct mdp_info *mdp) { downscale_x_table = MDP_DOWNSCALE_MAX; downscale_y_table = MDP_DOWNSCALE_MAX; load_table(mdp, mdp_upscale_table, ARRAY_SIZE(mdp_upscale_table)); }
gpl-2.0
louis-langholtz/linux
drivers/gpu/drm/bochs/bochs_hw.c
1289
5375
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include "bochs.h" /* ---------------------------------------------------------------------- */ static void bochs_vga_writeb(struct bochs_device *bochs, u16 ioport, u8 val) { if (WARN_ON(ioport < 0x3c0 || ioport > 0x3df)) return; if (bochs->mmio) { int offset = ioport - 0x3c0 + 0x400; writeb(val, bochs->mmio + offset); } else { outb(val, ioport); } } static u16 bochs_dispi_read(struct bochs_device *bochs, u16 reg) { u16 ret = 0; if (bochs->mmio) { int offset = 0x500 + (reg << 1); ret = readw(bochs->mmio + offset); } else { outw(reg, VBE_DISPI_IOPORT_INDEX); ret = inw(VBE_DISPI_IOPORT_DATA); } return ret; } static void bochs_dispi_write(struct bochs_device *bochs, u16 reg, u16 val) { if (bochs->mmio) { int offset = 0x500 + (reg << 1); writew(val, bochs->mmio + offset); } else { outw(reg, VBE_DISPI_IOPORT_INDEX); outw(val, VBE_DISPI_IOPORT_DATA); } } int bochs_hw_init(struct drm_device *dev, uint32_t flags) { struct bochs_device *bochs = dev->dev_private; struct pci_dev *pdev = dev->pdev; unsigned long addr, size, mem, ioaddr, iosize, qext_size; u16 id; if (pdev->resource[2].flags & IORESOURCE_MEM) { /* mmio bar with vga and bochs registers present */ if (pci_request_region(pdev, 2, "bochs-drm") != 0) { DRM_ERROR("Cannot request mmio region\n"); return -EBUSY; } ioaddr = pci_resource_start(pdev, 2); iosize = pci_resource_len(pdev, 2); bochs->mmio = ioremap(ioaddr, iosize); if (bochs->mmio == NULL) { DRM_ERROR("Cannot map mmio region\n"); return -ENOMEM; } } else { ioaddr = VBE_DISPI_IOPORT_INDEX; iosize = 2; if (!request_region(ioaddr, iosize, "bochs-drm")) { DRM_ERROR("Cannot request ioports\n"); return -EBUSY; } bochs->ioports = 1; } id = bochs_dispi_read(bochs, VBE_DISPI_INDEX_ID); mem = bochs_dispi_read(bochs, VBE_DISPI_INDEX_VIDEO_MEMORY_64K) * 64 * 1024; if ((id & 0xfff0) != VBE_DISPI_ID0) { DRM_ERROR("ID mismatch\n"); return -ENODEV; } if ((pdev->resource[0].flags & IORESOURCE_MEM) == 0) return -ENODEV; addr = pci_resource_start(pdev, 0); size = pci_resource_len(pdev, 0); if (addr == 0) return -ENODEV; if (size != mem) { DRM_ERROR("Size mismatch: pci=%ld, bochs=%ld\n", size, mem); size = min(size, mem); } if (pci_request_region(pdev, 0, "bochs-drm") != 0) { DRM_ERROR("Cannot request framebuffer\n"); return -EBUSY; } bochs->fb_map = ioremap(addr, size); if (bochs->fb_map == NULL) { DRM_ERROR("Cannot map framebuffer\n"); return -ENOMEM; } bochs->fb_base = addr; bochs->fb_size = size; DRM_INFO("Found bochs VGA, ID 0x%x.\n", id); DRM_INFO("Framebuffer size %ld kB @ 0x%lx, %s @ 0x%lx.\n", size / 1024, addr, bochs->ioports ? "ioports" : "mmio", ioaddr); if (bochs->mmio && pdev->revision >= 2) { qext_size = readl(bochs->mmio + 0x600); if (qext_size < 4 || qext_size > iosize) goto noext; DRM_DEBUG("Found qemu ext regs, size %ld\n", qext_size); if (qext_size >= 8) { #ifdef __BIG_ENDIAN writel(0xbebebebe, bochs->mmio + 0x604); #else writel(0x1e1e1e1e, bochs->mmio + 0x604); #endif DRM_DEBUG(" qext endian: 0x%x\n", readl(bochs->mmio + 0x604)); } } noext: return 0; } void bochs_hw_fini(struct drm_device *dev) { struct bochs_device *bochs = dev->dev_private; if (bochs->mmio) iounmap(bochs->mmio); if (bochs->ioports) release_region(VBE_DISPI_IOPORT_INDEX, 2); if (bochs->fb_map) iounmap(bochs->fb_map); pci_release_regions(dev->pdev); } void bochs_hw_setmode(struct bochs_device *bochs, struct drm_display_mode *mode) { bochs->xres = mode->hdisplay; bochs->yres = mode->vdisplay; bochs->bpp = 32; bochs->stride = mode->hdisplay * (bochs->bpp / 8); bochs->yres_virtual = bochs->fb_size / bochs->stride; DRM_DEBUG_DRIVER("%dx%d @ %d bpp, vy %d\n", bochs->xres, bochs->yres, bochs->bpp, bochs->yres_virtual); bochs_vga_writeb(bochs, 0x3c0, 0x20); /* unblank */ bochs_dispi_write(bochs, VBE_DISPI_INDEX_ENABLE, 0); bochs_dispi_write(bochs, VBE_DISPI_INDEX_BPP, bochs->bpp); bochs_dispi_write(bochs, VBE_DISPI_INDEX_XRES, bochs->xres); bochs_dispi_write(bochs, VBE_DISPI_INDEX_YRES, bochs->yres); bochs_dispi_write(bochs, VBE_DISPI_INDEX_BANK, 0); bochs_dispi_write(bochs, VBE_DISPI_INDEX_VIRT_WIDTH, bochs->xres); bochs_dispi_write(bochs, VBE_DISPI_INDEX_VIRT_HEIGHT, bochs->yres_virtual); bochs_dispi_write(bochs, VBE_DISPI_INDEX_X_OFFSET, 0); bochs_dispi_write(bochs, VBE_DISPI_INDEX_Y_OFFSET, 0); bochs_dispi_write(bochs, VBE_DISPI_INDEX_ENABLE, VBE_DISPI_ENABLED | VBE_DISPI_LFB_ENABLED); } void bochs_hw_setbase(struct bochs_device *bochs, int x, int y, u64 addr) { unsigned long offset = (unsigned long)addr + y * bochs->stride + x * (bochs->bpp / 8); int vy = offset / bochs->stride; int vx = (offset % bochs->stride) * 8 / bochs->bpp; DRM_DEBUG_DRIVER("x %d, y %d, addr %llx -> offset %lx, vx %d, vy %d\n", x, y, addr, offset, vx, vy); bochs_dispi_write(bochs, VBE_DISPI_INDEX_X_OFFSET, vx); bochs_dispi_write(bochs, VBE_DISPI_INDEX_Y_OFFSET, vy); }
gpl-2.0
stefanstrogin/linux
drivers/regulator/dummy.c
1545
2228
/* * dummy.c * * Copyright 2010 Wolfson Microelectronics PLC. * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This is useful for systems with mixed controllable and * non-controllable regulators, as well as for allowing testing on * systems with no controllable regulators. */ #include <linux/err.h> #include <linux/export.h> #include <linux/platform_device.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #include "dummy.h" struct regulator_dev *dummy_regulator_rdev; static struct regulator_init_data dummy_initdata = { .constraints = { .always_on = 1, }, }; static struct regulator_ops dummy_ops; static const struct regulator_desc dummy_desc = { .name = "regulator-dummy", .id = -1, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, .ops = &dummy_ops, }; static int dummy_regulator_probe(struct platform_device *pdev) { struct regulator_config config = { }; int ret; config.dev = &pdev->dev; config.init_data = &dummy_initdata; dummy_regulator_rdev = regulator_register(&dummy_desc, &config); if (IS_ERR(dummy_regulator_rdev)) { ret = PTR_ERR(dummy_regulator_rdev); pr_err("Failed to register regulator: %d\n", ret); return ret; } return 0; } static struct platform_driver dummy_regulator_driver = { .probe = dummy_regulator_probe, .driver = { .name = "reg-dummy", }, }; static struct platform_device *dummy_pdev; void __init regulator_dummy_init(void) { int ret; dummy_pdev = platform_device_alloc("reg-dummy", -1); if (!dummy_pdev) { pr_err("Failed to allocate dummy regulator device\n"); return; } ret = platform_device_add(dummy_pdev); if (ret != 0) { pr_err("Failed to register dummy regulator device: %d\n", ret); platform_device_put(dummy_pdev); return; } ret = platform_driver_register(&dummy_regulator_driver); if (ret != 0) { pr_err("Failed to register dummy regulator driver: %d\n", ret); platform_device_unregister(dummy_pdev); } }
gpl-2.0
project-voodoo-vibrant/linux_sgh-t959v
sound/pci/au88x0/au88x0_core.c
1545
78891
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Library General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Vortex core low level functions. Author: Manuel Jander (mjander@users.sourceforge.cl) These functions are mainly the result of translations made from the original disassembly of the au88x0 binary drivers, written by Aureal before they went down. Many thanks to the Jeff Muizelaar, Kester Maddock, and whoever contributed to the OpenVortex project. The author of this file, put the few available pieces together and translated the rest of the riddle (Mix, Src and connection stuff). Some things are still to be discovered, and their meanings are unclear. Some of these functions aren't intended to be really used, rather to help to understand how does the AU88X0 chips work. Keep them in, because they could be used somewhere in the future. This code hasn't been tested or proof read thoroughly. If you wanna help, take a look at the AU88X0 assembly and check if this matches. Functions tested ok so far are (they show the desired effect at least): vortex_routes(); (1 bug fixed). vortex_adb_addroute(); vortex_adb_addroutes(); vortex_connect_codecplay(); vortex_src_flushbuffers(); vortex_adbdma_setmode(); note: still some unknown arguments! vortex_adbdma_startfifo(); vortex_adbdma_stopfifo(); vortex_fifo_setadbctrl(); note: still some unknown arguments! vortex_mix_setinputvolumebyte(); vortex_mix_enableinput(); vortex_mixer_addWTD(); (fixed) vortex_connection_adbdma_src_src(); vortex_connection_adbdma_src(); vortex_src_change_convratio(); vortex_src_addWTD(); (fixed) History: 01-03-2003 First revision. 01-21-2003 Some bug fixes. 17-02-2003 many bugfixes after a big versioning mess. 18-02-2003 JAAAAAHHHUUUUUU!!!! The mixer works !! I'm just so happy ! (2 hours later...) I cant believe it! Im really lucky today. Now the SRC is working too! Yeah! XMMS works ! 20-02-2003 First steps into the ALSA world. 28-02-2003 As my birthday present, i discovered how the DMA buffer pages really work :-). It was all wrong. 12-03-2003 ALSA driver starts working (2 channels). 16-03-2003 More srcblock_setupchannel discoveries. 12-04-2003 AU8830 playback support. Recording in the works. 17-04-2003 vortex_route() and vortex_routes() bug fixes. AU8830 recording works now, but chipn' dale effect is still there. 16-05-2003 SrcSetupChannel cleanup. Moved the Src setup stuff entirely into au88x0_pcm.c . 06-06-2003 Buffer shifter bugfix. Mixer volume fix. 07-12-2003 A3D routing finally fixed. Believed to be OK. 25-03-2004 Many thanks to Claudia, for such valuable bug reports. */ #include "au88x0.h" #include "au88x0_a3d.h" #include <linux/delay.h> /* MIXER (CAsp4Mix.s and CAsp4Mixer.s) */ // FIXME: get rid of this. static int mchannels[NR_MIXIN]; static int rampchs[NR_MIXIN]; static void vortex_mixer_en_sr(vortex_t * vortex, int channel) { hwwrite(vortex->mmio, VORTEX_MIXER_SR, hwread(vortex->mmio, VORTEX_MIXER_SR) | (0x1 << channel)); } static void vortex_mixer_dis_sr(vortex_t * vortex, int channel) { hwwrite(vortex->mmio, VORTEX_MIXER_SR, hwread(vortex->mmio, VORTEX_MIXER_SR) & ~(0x1 << channel)); } #if 0 static void vortex_mix_muteinputgain(vortex_t * vortex, unsigned char mix, unsigned char channel) { hwwrite(vortex->mmio, VORTEX_MIX_INVOL_A + ((mix << 5) + channel), 0x80); hwwrite(vortex->mmio, VORTEX_MIX_INVOL_B + ((mix << 5) + channel), 0x80); } static int vortex_mix_getvolume(vortex_t * vortex, unsigned char mix) { int a; a = hwread(vortex->mmio, VORTEX_MIX_VOL_A + (mix << 2)) & 0xff; //FP2LinearFrac(a); return (a); } static int vortex_mix_getinputvolume(vortex_t * vortex, unsigned char mix, int channel, int *vol) { int a; if (!(mchannels[mix] & (1 << channel))) return 0; a = hwread(vortex->mmio, VORTEX_MIX_INVOL_A + (((mix << 5) + channel) << 2)); /* if (rampchs[mix] == 0) a = FP2LinearFrac(a); else a = FP2LinearFracWT(a); */ *vol = a; return (0); } static unsigned int vortex_mix_boost6db(unsigned char vol) { return (vol + 8); /* WOW! what a complex function! */ } static void vortex_mix_rampvolume(vortex_t * vortex, int mix) { int ch; char a; // This function is intended for ramping down only (see vortex_disableinput()). for (ch = 0; ch < 0x20; ch++) { if (((1 << ch) & rampchs[mix]) == 0) continue; a = hwread(vortex->mmio, VORTEX_MIX_INVOL_B + (((mix << 5) + ch) << 2)); if (a > -126) { a -= 2; hwwrite(vortex->mmio, VORTEX_MIX_INVOL_A + (((mix << 5) + ch) << 2), a); hwwrite(vortex->mmio, VORTEX_MIX_INVOL_B + (((mix << 5) + ch) << 2), a); } else vortex_mix_killinput(vortex, mix, ch); } } static int vortex_mix_getenablebit(vortex_t * vortex, unsigned char mix, int mixin) { int addr, temp; if (mixin >= 0) addr = mixin; else addr = mixin + 3; addr = ((mix << 3) + (addr >> 2)) << 2; temp = hwread(vortex->mmio, VORTEX_MIX_ENIN + addr); return ((temp >> (mixin & 3)) & 1); } #endif static void vortex_mix_setvolumebyte(vortex_t * vortex, unsigned char mix, unsigned char vol) { int temp; hwwrite(vortex->mmio, VORTEX_MIX_VOL_A + (mix << 2), vol); if (1) { /*if (this_10) */ temp = hwread(vortex->mmio, VORTEX_MIX_VOL_B + (mix << 2)); if ((temp != 0x80) || (vol == 0x80)) return; } hwwrite(vortex->mmio, VORTEX_MIX_VOL_B + (mix << 2), vol); } static void vortex_mix_setinputvolumebyte(vortex_t * vortex, unsigned char mix, int mixin, unsigned char vol) { int temp; hwwrite(vortex->mmio, VORTEX_MIX_INVOL_A + (((mix << 5) + mixin) << 2), vol); if (1) { /* this_10, initialized to 1. */ temp = hwread(vortex->mmio, VORTEX_MIX_INVOL_B + (((mix << 5) + mixin) << 2)); if ((temp != 0x80) || (vol == 0x80)) return; } hwwrite(vortex->mmio, VORTEX_MIX_INVOL_B + (((mix << 5) + mixin) << 2), vol); } static void vortex_mix_setenablebit(vortex_t * vortex, unsigned char mix, int mixin, int en) { int temp, addr; if (mixin < 0) addr = (mixin + 3); else addr = mixin; addr = ((mix << 3) + (addr >> 2)) << 2; temp = hwread(vortex->mmio, VORTEX_MIX_ENIN + addr); if (en) temp |= (1 << (mixin & 3)); else temp &= ~(1 << (mixin & 3)); /* Mute input. Astatic void crackling? */ hwwrite(vortex->mmio, VORTEX_MIX_INVOL_B + (((mix << 5) + mixin) << 2), 0x80); /* Looks like clear buffer. */ hwwrite(vortex->mmio, VORTEX_MIX_SMP + (mixin << 2), 0x0); hwwrite(vortex->mmio, VORTEX_MIX_SMP + 4 + (mixin << 2), 0x0); /* Write enable bit. */ hwwrite(vortex->mmio, VORTEX_MIX_ENIN + addr, temp); } static void vortex_mix_killinput(vortex_t * vortex, unsigned char mix, int mixin) { rampchs[mix] &= ~(1 << mixin); vortex_mix_setinputvolumebyte(vortex, mix, mixin, 0x80); mchannels[mix] &= ~(1 << mixin); vortex_mix_setenablebit(vortex, mix, mixin, 0); } static void vortex_mix_enableinput(vortex_t * vortex, unsigned char mix, int mixin) { vortex_mix_killinput(vortex, mix, mixin); if ((mchannels[mix] & (1 << mixin)) == 0) { vortex_mix_setinputvolumebyte(vortex, mix, mixin, 0x80); /*0x80 : mute */ mchannels[mix] |= (1 << mixin); } vortex_mix_setenablebit(vortex, mix, mixin, 1); } static void vortex_mix_disableinput(vortex_t * vortex, unsigned char mix, int channel, int ramp) { if (ramp) { rampchs[mix] |= (1 << channel); // Register callback. //vortex_mix_startrampvolume(vortex); vortex_mix_killinput(vortex, mix, channel); } else vortex_mix_killinput(vortex, mix, channel); } static int vortex_mixer_addWTD(vortex_t * vortex, unsigned char mix, unsigned char ch) { int temp, lifeboat = 0, prev; temp = hwread(vortex->mmio, VORTEX_MIXER_SR); if ((temp & (1 << ch)) == 0) { hwwrite(vortex->mmio, VORTEX_MIXER_CHNBASE + (ch << 2), mix); vortex_mixer_en_sr(vortex, ch); return 1; } prev = VORTEX_MIXER_CHNBASE + (ch << 2); temp = hwread(vortex->mmio, prev); while (temp & 0x10) { prev = VORTEX_MIXER_RTBASE + ((temp & 0xf) << 2); temp = hwread(vortex->mmio, prev); //printk(KERN_INFO "vortex: mixAddWTD: while addr=%x, val=%x\n", prev, temp); if ((++lifeboat) > 0xf) { printk(KERN_ERR "vortex_mixer_addWTD: lifeboat overflow\n"); return 0; } } hwwrite(vortex->mmio, VORTEX_MIXER_RTBASE + ((temp & 0xf) << 2), mix); hwwrite(vortex->mmio, prev, (temp & 0xf) | 0x10); return 1; } static int vortex_mixer_delWTD(vortex_t * vortex, unsigned char mix, unsigned char ch) { int esp14 = -1, esp18, eax, ebx, edx, ebp, esi = 0; //int esp1f=edi(while)=src, esp10=ch; eax = hwread(vortex->mmio, VORTEX_MIXER_SR); if (((1 << ch) & eax) == 0) { printk(KERN_ERR "mix ALARM %x\n", eax); return 0; } ebp = VORTEX_MIXER_CHNBASE + (ch << 2); esp18 = hwread(vortex->mmio, ebp); if (esp18 & 0x10) { ebx = (esp18 & 0xf); if (mix == ebx) { ebx = VORTEX_MIXER_RTBASE + (mix << 2); edx = hwread(vortex->mmio, ebx); //7b60 hwwrite(vortex->mmio, ebp, edx); hwwrite(vortex->mmio, ebx, 0); } else { //7ad3 edx = hwread(vortex->mmio, VORTEX_MIXER_RTBASE + (ebx << 2)); //printk(KERN_INFO "vortex: mixdelWTD: 1 addr=%x, val=%x, src=%x\n", ebx, edx, src); while ((edx & 0xf) != mix) { if ((esi) > 0xf) { printk(KERN_ERR "vortex: mixdelWTD: error lifeboat overflow\n"); return 0; } esp14 = ebx; ebx = edx & 0xf; ebp = ebx << 2; edx = hwread(vortex->mmio, VORTEX_MIXER_RTBASE + ebp); //printk(KERN_INFO "vortex: mixdelWTD: while addr=%x, val=%x\n", ebp, edx); esi++; } //7b30 ebp = ebx << 2; if (edx & 0x10) { /* Delete entry in between others */ ebx = VORTEX_MIXER_RTBASE + ((edx & 0xf) << 2); edx = hwread(vortex->mmio, ebx); //7b60 hwwrite(vortex->mmio, VORTEX_MIXER_RTBASE + ebp, edx); hwwrite(vortex->mmio, ebx, 0); //printk(KERN_INFO "vortex mixdelWTD between addr= 0x%x, val= 0x%x\n", ebp, edx); } else { /* Delete last entry */ //7b83 if (esp14 == -1) hwwrite(vortex->mmio, VORTEX_MIXER_CHNBASE + (ch << 2), esp18 & 0xef); else { ebx = (0xffffffe0 & edx) | (0xf & ebx); hwwrite(vortex->mmio, VORTEX_MIXER_RTBASE + (esp14 << 2), ebx); //printk(KERN_INFO "vortex mixdelWTD last addr= 0x%x, val= 0x%x\n", esp14, ebx); } hwwrite(vortex->mmio, VORTEX_MIXER_RTBASE + ebp, 0); return 1; } } } else { //printk(KERN_INFO "removed last mix\n"); //7be0 vortex_mixer_dis_sr(vortex, ch); hwwrite(vortex->mmio, ebp, 0); } return 1; } static void vortex_mixer_init(vortex_t * vortex) { u32 addr; int x; // FIXME: get rid of this crap. memset(mchannels, 0, NR_MIXOUT * sizeof(int)); memset(rampchs, 0, NR_MIXOUT * sizeof(int)); addr = VORTEX_MIX_SMP + 0x17c; for (x = 0x5f; x >= 0; x--) { hwwrite(vortex->mmio, addr, 0); addr -= 4; } addr = VORTEX_MIX_ENIN + 0x1fc; for (x = 0x7f; x >= 0; x--) { hwwrite(vortex->mmio, addr, 0); addr -= 4; } addr = VORTEX_MIX_SMP + 0x17c; for (x = 0x5f; x >= 0; x--) { hwwrite(vortex->mmio, addr, 0); addr -= 4; } addr = VORTEX_MIX_INVOL_A + 0x7fc; for (x = 0x1ff; x >= 0; x--) { hwwrite(vortex->mmio, addr, 0x80); addr -= 4; } addr = VORTEX_MIX_VOL_A + 0x3c; for (x = 0xf; x >= 0; x--) { hwwrite(vortex->mmio, addr, 0x80); addr -= 4; } addr = VORTEX_MIX_INVOL_B + 0x7fc; for (x = 0x1ff; x >= 0; x--) { hwwrite(vortex->mmio, addr, 0x80); addr -= 4; } addr = VORTEX_MIX_VOL_B + 0x3c; for (x = 0xf; x >= 0; x--) { hwwrite(vortex->mmio, addr, 0x80); addr -= 4; } addr = VORTEX_MIXER_RTBASE + (MIXER_RTBASE_SIZE - 1) * 4; for (x = (MIXER_RTBASE_SIZE - 1); x >= 0; x--) { hwwrite(vortex->mmio, addr, 0x0); addr -= 4; } hwwrite(vortex->mmio, VORTEX_MIXER_SR, 0); /* Set clipping ceiling (this may be all wrong). */ /* for (x = 0; x < 0x80; x++) { hwwrite(vortex->mmio, VORTEX_MIXER_CLIP + (x << 2), 0x3ffff); } */ /* call CAsp4Mix__Initialize_CAsp4HwIO____CAsp4Mixer____ Register ISR callback for volume smooth fade out. Maybe this avoids clicks when press "stop" ? */ } /* SRC (CAsp4Src.s and CAsp4SrcBlock) */ static void vortex_src_en_sr(vortex_t * vortex, int channel) { hwwrite(vortex->mmio, VORTEX_SRCBLOCK_SR, hwread(vortex->mmio, VORTEX_SRCBLOCK_SR) | (0x1 << channel)); } static void vortex_src_dis_sr(vortex_t * vortex, int channel) { hwwrite(vortex->mmio, VORTEX_SRCBLOCK_SR, hwread(vortex->mmio, VORTEX_SRCBLOCK_SR) & ~(0x1 << channel)); } static void vortex_src_flushbuffers(vortex_t * vortex, unsigned char src) { int i; for (i = 0x1f; i >= 0; i--) hwwrite(vortex->mmio, VORTEX_SRC_DATA0 + (src << 7) + (i << 2), 0); hwwrite(vortex->mmio, VORTEX_SRC_DATA + (src << 3), 0); hwwrite(vortex->mmio, VORTEX_SRC_DATA + (src << 3) + 4, 0); } static void vortex_src_cleardrift(vortex_t * vortex, unsigned char src) { hwwrite(vortex->mmio, VORTEX_SRC_DRIFT0 + (src << 2), 0); hwwrite(vortex->mmio, VORTEX_SRC_DRIFT1 + (src << 2), 0); hwwrite(vortex->mmio, VORTEX_SRC_DRIFT2 + (src << 2), 1); } static void vortex_src_set_throttlesource(vortex_t * vortex, unsigned char src, int en) { int temp; temp = hwread(vortex->mmio, VORTEX_SRC_SOURCE); if (en) temp |= 1 << src; else temp &= ~(1 << src); hwwrite(vortex->mmio, VORTEX_SRC_SOURCE, temp); } static int vortex_src_persist_convratio(vortex_t * vortex, unsigned char src, int ratio) { int temp, lifeboat = 0; do { hwwrite(vortex->mmio, VORTEX_SRC_CONVRATIO + (src << 2), ratio); temp = hwread(vortex->mmio, VORTEX_SRC_CONVRATIO + (src << 2)); if ((++lifeboat) > 0x9) { printk(KERN_ERR "Vortex: Src cvr fail\n"); break; } } while (temp != ratio); return temp; } #if 0 static void vortex_src_slowlock(vortex_t * vortex, unsigned char src) { int temp; hwwrite(vortex->mmio, VORTEX_SRC_DRIFT2 + (src << 2), 1); hwwrite(vortex->mmio, VORTEX_SRC_DRIFT0 + (src << 2), 0); temp = hwread(vortex->mmio, VORTEX_SRC_U0 + (src << 2)); if (temp & 0x200) hwwrite(vortex->mmio, VORTEX_SRC_U0 + (src << 2), temp & ~0x200L); } static void vortex_src_change_convratio(vortex_t * vortex, unsigned char src, int ratio) { int temp, a; if ((ratio & 0x10000) && (ratio != 0x10000)) { if (ratio & 0x3fff) a = (0x11 - ((ratio >> 0xe) & 0x3)) - 1; else a = (0x11 - ((ratio >> 0xe) & 0x3)) - 2; } else a = 0xc; temp = hwread(vortex->mmio, VORTEX_SRC_U0 + (src << 2)); if (((temp >> 4) & 0xf) != a) hwwrite(vortex->mmio, VORTEX_SRC_U0 + (src << 2), (temp & 0xf) | ((a & 0xf) << 4)); vortex_src_persist_convratio(vortex, src, ratio); } static int vortex_src_checkratio(vortex_t * vortex, unsigned char src, unsigned int desired_ratio) { int hw_ratio, lifeboat = 0; hw_ratio = hwread(vortex->mmio, VORTEX_SRC_CONVRATIO + (src << 2)); while (hw_ratio != desired_ratio) { hwwrite(vortex->mmio, VORTEX_SRC_CONVRATIO + (src << 2), desired_ratio); if ((lifeboat++) > 15) { printk(KERN_ERR "Vortex: could not set src-%d from %d to %d\n", src, hw_ratio, desired_ratio); break; } } return hw_ratio; } #endif /* Objective: Set samplerate for given SRC module. Arguments: card: pointer to vortex_t strcut. src: Integer index of the SRC module. cr: Current sample rate conversion factor. b: unknown 16 bit value. sweep: Enable Samplerate fade from cr toward tr flag. dirplay: 1: playback, 0: recording. sl: Slow Lock flag. tr: Target samplerate conversion. thsource: Throttle source flag (no idea what that means). */ static void vortex_src_setupchannel(vortex_t * card, unsigned char src, unsigned int cr, unsigned int b, int sweep, int d, int dirplay, int sl, unsigned int tr, int thsource) { // noplayback: d=2,4,7,0xa,0xb when using first 2 src's. // c: enables pitch sweep. // looks like g is c related. Maybe g is a sweep parameter ? // g = cvr // dirplay: 0 = recording, 1 = playback // d = src hw index. int esi, ebp = 0, esp10; vortex_src_flushbuffers(card, src); if (sweep) { if ((tr & 0x10000) && (tr != 0x10000)) { tr = 0; esi = 0x7; } else { if ((((short)tr) < 0) && (tr != 0x8000)) { tr = 0; esi = 0x8; } else { tr = 1; esi = 0xc; } } } else { if ((cr & 0x10000) && (cr != 0x10000)) { tr = 0; /*ebx = 0 */ esi = 0x11 - ((cr >> 0xe) & 7); if (cr & 0x3fff) esi -= 1; else esi -= 2; } else { tr = 1; esi = 0xc; } } vortex_src_cleardrift(card, src); vortex_src_set_throttlesource(card, src, thsource); if ((dirplay == 0) && (sweep == 0)) { if (tr) esp10 = 0xf; else esp10 = 0xc; ebp = 0; } else { if (tr) ebp = 0xf; else ebp = 0xc; esp10 = 0; } hwwrite(card->mmio, VORTEX_SRC_U0 + (src << 2), (sl << 0x9) | (sweep << 0x8) | ((esi & 0xf) << 4) | d); /* 0xc0 esi=0xc c=f=0 d=0 */ vortex_src_persist_convratio(card, src, cr); hwwrite(card->mmio, VORTEX_SRC_U1 + (src << 2), b & 0xffff); /* 0 b=0 */ hwwrite(card->mmio, VORTEX_SRC_U2 + (src << 2), (tr << 0x11) | (dirplay << 0x10) | (ebp << 0x8) | esp10); /* 0x30f00 e=g=1 esp10=0 ebp=f */ //printk(KERN_INFO "vortex: SRC %d, d=0x%x, esi=0x%x, esp10=0x%x, ebp=0x%x\n", src, d, esi, esp10, ebp); } static void vortex_srcblock_init(vortex_t * vortex) { u32 addr; int x; hwwrite(vortex->mmio, VORTEX_SRC_SOURCESIZE, 0x1ff); /* for (x=0; x<0x10; x++) { vortex_src_init(&vortex_src[x], x); } */ //addr = 0xcc3c; //addr = 0x26c3c; addr = VORTEX_SRC_RTBASE + 0x3c; for (x = 0xf; x >= 0; x--) { hwwrite(vortex->mmio, addr, 0); addr -= 4; } //addr = 0xcc94; //addr = 0x26c94; addr = VORTEX_SRC_CHNBASE + 0x54; for (x = 0x15; x >= 0; x--) { hwwrite(vortex->mmio, addr, 0); addr -= 4; } } static int vortex_src_addWTD(vortex_t * vortex, unsigned char src, unsigned char ch) { int temp, lifeboat = 0, prev; // esp13 = src temp = hwread(vortex->mmio, VORTEX_SRCBLOCK_SR); if ((temp & (1 << ch)) == 0) { hwwrite(vortex->mmio, VORTEX_SRC_CHNBASE + (ch << 2), src); vortex_src_en_sr(vortex, ch); return 1; } prev = VORTEX_SRC_CHNBASE + (ch << 2); /*ebp */ temp = hwread(vortex->mmio, prev); //while (temp & NR_SRC) { while (temp & 0x10) { prev = VORTEX_SRC_RTBASE + ((temp & 0xf) << 2); /*esp12 */ //prev = VORTEX_SRC_RTBASE + ((temp & (NR_SRC-1)) << 2); /*esp12*/ temp = hwread(vortex->mmio, prev); //printk(KERN_INFO "vortex: srcAddWTD: while addr=%x, val=%x\n", prev, temp); if ((++lifeboat) > 0xf) { printk(KERN_ERR "vortex_src_addWTD: lifeboat overflow\n"); return 0; } } hwwrite(vortex->mmio, VORTEX_SRC_RTBASE + ((temp & 0xf) << 2), src); //hwwrite(vortex->mmio, prev, (temp & (NR_SRC-1)) | NR_SRC); hwwrite(vortex->mmio, prev, (temp & 0xf) | 0x10); return 1; } static int vortex_src_delWTD(vortex_t * vortex, unsigned char src, unsigned char ch) { int esp14 = -1, esp18, eax, ebx, edx, ebp, esi = 0; //int esp1f=edi(while)=src, esp10=ch; eax = hwread(vortex->mmio, VORTEX_SRCBLOCK_SR); if (((1 << ch) & eax) == 0) { printk(KERN_ERR "src alarm\n"); return 0; } ebp = VORTEX_SRC_CHNBASE + (ch << 2); esp18 = hwread(vortex->mmio, ebp); if (esp18 & 0x10) { ebx = (esp18 & 0xf); if (src == ebx) { ebx = VORTEX_SRC_RTBASE + (src << 2); edx = hwread(vortex->mmio, ebx); //7b60 hwwrite(vortex->mmio, ebp, edx); hwwrite(vortex->mmio, ebx, 0); } else { //7ad3 edx = hwread(vortex->mmio, VORTEX_SRC_RTBASE + (ebx << 2)); //printk(KERN_INFO "vortex: srcdelWTD: 1 addr=%x, val=%x, src=%x\n", ebx, edx, src); while ((edx & 0xf) != src) { if ((esi) > 0xf) { printk ("vortex: srcdelWTD: error, lifeboat overflow\n"); return 0; } esp14 = ebx; ebx = edx & 0xf; ebp = ebx << 2; edx = hwread(vortex->mmio, VORTEX_SRC_RTBASE + ebp); //printk(KERN_INFO "vortex: srcdelWTD: while addr=%x, val=%x\n", ebp, edx); esi++; } //7b30 ebp = ebx << 2; if (edx & 0x10) { /* Delete entry in between others */ ebx = VORTEX_SRC_RTBASE + ((edx & 0xf) << 2); edx = hwread(vortex->mmio, ebx); //7b60 hwwrite(vortex->mmio, VORTEX_SRC_RTBASE + ebp, edx); hwwrite(vortex->mmio, ebx, 0); //printk(KERN_INFO "vortex srcdelWTD between addr= 0x%x, val= 0x%x\n", ebp, edx); } else { /* Delete last entry */ //7b83 if (esp14 == -1) hwwrite(vortex->mmio, VORTEX_SRC_CHNBASE + (ch << 2), esp18 & 0xef); else { ebx = (0xffffffe0 & edx) | (0xf & ebx); hwwrite(vortex->mmio, VORTEX_SRC_RTBASE + (esp14 << 2), ebx); //printk(KERN_INFO"vortex srcdelWTD last addr= 0x%x, val= 0x%x\n", esp14, ebx); } hwwrite(vortex->mmio, VORTEX_SRC_RTBASE + ebp, 0); return 1; } } } else { //7be0 vortex_src_dis_sr(vortex, ch); hwwrite(vortex->mmio, ebp, 0); } return 1; } /*FIFO*/ static void vortex_fifo_clearadbdata(vortex_t * vortex, int fifo, int x) { for (x--; x >= 0; x--) hwwrite(vortex->mmio, VORTEX_FIFO_ADBDATA + (((fifo << FIFO_SIZE_BITS) + x) << 2), 0); } #if 0 static void vortex_fifo_adbinitialize(vortex_t * vortex, int fifo, int j) { vortex_fifo_clearadbdata(vortex, fifo, FIFO_SIZE); #ifdef CHIP_AU8820 hwwrite(vortex->mmio, VORTEX_FIFO_ADBCTRL + (fifo << 2), (FIFO_U1 | ((j & FIFO_MASK) << 0xb))); #else hwwrite(vortex->mmio, VORTEX_FIFO_ADBCTRL + (fifo << 2), (FIFO_U1 | ((j & FIFO_MASK) << 0xc))); #endif } #endif static void vortex_fifo_setadbvalid(vortex_t * vortex, int fifo, int en) { hwwrite(vortex->mmio, VORTEX_FIFO_ADBCTRL + (fifo << 2), (hwread(vortex->mmio, VORTEX_FIFO_ADBCTRL + (fifo << 2)) & 0xffffffef) | ((1 & en) << 4) | FIFO_U1); } static void vortex_fifo_setadbctrl(vortex_t * vortex, int fifo, int b, int priority, int empty, int valid, int f) { int temp, lifeboat = 0; //int this_8[NR_ADB] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; /* position */ int this_4 = 0x2; /* f seems priority related. * CAsp4AdbDma::SetPriority is the only place that calls SetAdbCtrl with f set to 1 * every where else it is set to 0. It seems, however, that CAsp4AdbDma::SetPriority * is never called, thus the f related bits remain a mystery for now. */ do { temp = hwread(vortex->mmio, VORTEX_FIFO_ADBCTRL + (fifo << 2)); if (lifeboat++ > 0xbb8) { printk(KERN_ERR "Vortex: vortex_fifo_setadbctrl fail\n"); break; } } while (temp & FIFO_RDONLY); // AU8830 semes to take some special care about fifo content (data). // But i'm just to lazy to translate that :) if (valid) { if ((temp & FIFO_VALID) == 0) { //this_8[fifo] = 0; vortex_fifo_clearadbdata(vortex, fifo, FIFO_SIZE); // this_4 #ifdef CHIP_AU8820 temp = (this_4 & 0x1f) << 0xb; #else temp = (this_4 & 0x3f) << 0xc; #endif temp = (temp & 0xfffffffd) | ((b & 1) << 1); temp = (temp & 0xfffffff3) | ((priority & 3) << 2); temp = (temp & 0xffffffef) | ((valid & 1) << 4); temp |= FIFO_U1; temp = (temp & 0xffffffdf) | ((empty & 1) << 5); #ifdef CHIP_AU8820 temp = (temp & 0xfffbffff) | ((f & 1) << 0x12); #endif #ifdef CHIP_AU8830 temp = (temp & 0xf7ffffff) | ((f & 1) << 0x1b); temp = (temp & 0xefffffff) | ((f & 1) << 0x1c); #endif #ifdef CHIP_AU8810 temp = (temp & 0xfeffffff) | ((f & 1) << 0x18); temp = (temp & 0xfdffffff) | ((f & 1) << 0x19); #endif } } else { if (temp & FIFO_VALID) { #ifdef CHIP_AU8820 temp = ((f & 1) << 0x12) | (temp & 0xfffbffef); #endif #ifdef CHIP_AU8830 temp = ((f & 1) << 0x1b) | (temp & 0xe7ffffef) | FIFO_BITS; #endif #ifdef CHIP_AU8810 temp = ((f & 1) << 0x18) | (temp & 0xfcffffef) | FIFO_BITS; #endif } else /*if (this_8[fifo]) */ vortex_fifo_clearadbdata(vortex, fifo, FIFO_SIZE); } hwwrite(vortex->mmio, VORTEX_FIFO_ADBCTRL + (fifo << 2), temp); hwread(vortex->mmio, VORTEX_FIFO_ADBCTRL + (fifo << 2)); } #ifndef CHIP_AU8810 static void vortex_fifo_clearwtdata(vortex_t * vortex, int fifo, int x) { if (x < 1) return; for (x--; x >= 0; x--) hwwrite(vortex->mmio, VORTEX_FIFO_WTDATA + (((fifo << FIFO_SIZE_BITS) + x) << 2), 0); } static void vortex_fifo_wtinitialize(vortex_t * vortex, int fifo, int j) { vortex_fifo_clearwtdata(vortex, fifo, FIFO_SIZE); #ifdef CHIP_AU8820 hwwrite(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2), (FIFO_U1 | ((j & FIFO_MASK) << 0xb))); #else hwwrite(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2), (FIFO_U1 | ((j & FIFO_MASK) << 0xc))); #endif } static void vortex_fifo_setwtvalid(vortex_t * vortex, int fifo, int en) { hwwrite(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2), (hwread(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2)) & 0xffffffef) | ((en & 1) << 4) | FIFO_U1); } static void vortex_fifo_setwtctrl(vortex_t * vortex, int fifo, int ctrl, int priority, int empty, int valid, int f) { int temp = 0, lifeboat = 0; int this_4 = 2; do { temp = hwread(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2)); if (lifeboat++ > 0xbb8) { printk(KERN_ERR "Vortex: vortex_fifo_setwtctrl fail\n"); break; } } while (temp & FIFO_RDONLY); if (valid) { if ((temp & FIFO_VALID) == 0) { vortex_fifo_clearwtdata(vortex, fifo, FIFO_SIZE); // this_4 #ifdef CHIP_AU8820 temp = (this_4 & 0x1f) << 0xb; #else temp = (this_4 & 0x3f) << 0xc; #endif temp = (temp & 0xfffffffd) | ((ctrl & 1) << 1); temp = (temp & 0xfffffff3) | ((priority & 3) << 2); temp = (temp & 0xffffffef) | ((valid & 1) << 4); temp |= FIFO_U1; temp = (temp & 0xffffffdf) | ((empty & 1) << 5); #ifdef CHIP_AU8820 temp = (temp & 0xfffbffff) | ((f & 1) << 0x12); #endif #ifdef CHIP_AU8830 temp = (temp & 0xf7ffffff) | ((f & 1) << 0x1b); temp = (temp & 0xefffffff) | ((f & 1) << 0x1c); #endif #ifdef CHIP_AU8810 temp = (temp & 0xfeffffff) | ((f & 1) << 0x18); temp = (temp & 0xfdffffff) | ((f & 1) << 0x19); #endif } } else { if (temp & FIFO_VALID) { #ifdef CHIP_AU8820 temp = ((f & 1) << 0x12) | (temp & 0xfffbffef); #endif #ifdef CHIP_AU8830 temp = ((f & 1) << 0x1b) | (temp & 0xe7ffffef) | FIFO_BITS; #endif #ifdef CHIP_AU8810 temp = ((f & 1) << 0x18) | (temp & 0xfcffffef) | FIFO_BITS; #endif } else /*if (this_8[fifo]) */ vortex_fifo_clearwtdata(vortex, fifo, FIFO_SIZE); } hwwrite(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2), temp); hwread(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2)); /* do { temp = hwread(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2)); if (lifeboat++ > 0xbb8) { printk(KERN_ERR "Vortex: vortex_fifo_setwtctrl fail (hanging)\n"); break; } } while ((temp & FIFO_RDONLY)&&(temp & FIFO_VALID)&&(temp != 0xFFFFFFFF)); if (valid) { if (temp & FIFO_VALID) { temp = 0x40000; //temp |= 0x08000000; //temp |= 0x10000000; //temp |= 0x04000000; //temp |= 0x00400000; temp |= 0x1c400000; temp &= 0xFFFFFFF3; temp &= 0xFFFFFFEF; temp |= (valid & 1) << 4; hwwrite(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2), temp); return; } else { vortex_fifo_clearwtdata(vortex, fifo, FIFO_SIZE); return; } } else { temp &= 0xffffffef; temp |= 0x08000000; temp |= 0x10000000; temp |= 0x04000000; temp |= 0x00400000; hwwrite(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2), temp); temp = hwread(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2)); //((temp >> 6) & 0x3f) priority = 0; if (((temp & 0x0fc0) ^ ((temp >> 6) & 0x0fc0)) & 0FFFFFFC0) vortex_fifo_clearwtdata(vortex, fifo, FIFO_SIZE); valid = 0xfb; temp = (temp & 0xfffffffd) | ((ctrl & 1) << 1); temp = (temp & 0xfffdffff) | ((f & 1) << 0x11); temp = (temp & 0xfffffff3) | ((priority & 3) << 2); temp = (temp & 0xffffffef) | ((valid & 1) << 4); temp = (temp & 0xffffffdf) | ((empty & 1) << 5); hwwrite(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2), temp); } */ /* temp = (temp & 0xfffffffd) | ((ctrl & 1) << 1); temp = (temp & 0xfffdffff) | ((f & 1) << 0x11); temp = (temp & 0xfffffff3) | ((priority & 3) << 2); temp = (temp & 0xffffffef) | ((valid & 1) << 4); temp = (temp & 0xffffffdf) | ((empty & 1) << 5); #ifdef FIFO_BITS temp = temp | FIFO_BITS | 40000; #endif // 0x1c440010, 0x1c400000 hwwrite(vortex->mmio, VORTEX_FIFO_WTCTRL + (fifo << 2), temp); */ } #endif static void vortex_fifo_init(vortex_t * vortex) { int x; u32 addr; /* ADB DMA channels fifos. */ addr = VORTEX_FIFO_ADBCTRL + ((NR_ADB - 1) * 4); for (x = NR_ADB - 1; x >= 0; x--) { hwwrite(vortex->mmio, addr, (FIFO_U0 | FIFO_U1)); if (hwread(vortex->mmio, addr) != (FIFO_U0 | FIFO_U1)) printk(KERN_ERR "bad adb fifo reset!"); vortex_fifo_clearadbdata(vortex, x, FIFO_SIZE); addr -= 4; } #ifndef CHIP_AU8810 /* WT DMA channels fifos. */ addr = VORTEX_FIFO_WTCTRL + ((NR_WT - 1) * 4); for (x = NR_WT - 1; x >= 0; x--) { hwwrite(vortex->mmio, addr, FIFO_U0); if (hwread(vortex->mmio, addr) != FIFO_U0) printk(KERN_ERR "bad wt fifo reset (0x%08x, 0x%08x)!\n", addr, hwread(vortex->mmio, addr)); vortex_fifo_clearwtdata(vortex, x, FIFO_SIZE); addr -= 4; } #endif /* trigger... */ #ifdef CHIP_AU8820 hwwrite(vortex->mmio, 0xf8c0, 0xd03); //0x0843 0xd6b #else #ifdef CHIP_AU8830 hwwrite(vortex->mmio, 0x17000, 0x61); /* wt a */ hwwrite(vortex->mmio, 0x17004, 0x61); /* wt b */ #endif hwwrite(vortex->mmio, 0x17008, 0x61); /* adb */ #endif } /* ADBDMA */ static void vortex_adbdma_init(vortex_t * vortex) { } static void vortex_adbdma_setfirstbuffer(vortex_t * vortex, int adbdma) { stream_t *dma = &vortex->dma_adb[adbdma]; hwwrite(vortex->mmio, VORTEX_ADBDMA_CTRL + (adbdma << 2), dma->dma_ctrl); } static void vortex_adbdma_setstartbuffer(vortex_t * vortex, int adbdma, int sb) { stream_t *dma = &vortex->dma_adb[adbdma]; //hwwrite(vortex->mmio, VORTEX_ADBDMA_START + (adbdma << 2), sb << (((NR_ADB-1)-((adbdma&0xf)*2)))); hwwrite(vortex->mmio, VORTEX_ADBDMA_START + (adbdma << 2), sb << ((0xf - (adbdma & 0xf)) * 2)); dma->period_real = dma->period_virt = sb; } static void vortex_adbdma_setbuffers(vortex_t * vortex, int adbdma, int psize, int count) { stream_t *dma = &vortex->dma_adb[adbdma]; dma->period_bytes = psize; dma->nr_periods = count; dma->cfg0 = 0; dma->cfg1 = 0; switch (count) { /* Four or more pages */ default: case 4: dma->cfg1 |= 0x88000000 | 0x44000000 | 0x30000000 | (psize - 1); hwwrite(vortex->mmio, VORTEX_ADBDMA_BUFBASE + (adbdma << 4) + 0xc, snd_pcm_sgbuf_get_addr(dma->substream, psize * 3)); /* 3 pages */ case 3: dma->cfg0 |= 0x12000000; dma->cfg1 |= 0x80000000 | 0x40000000 | ((psize - 1) << 0xc); hwwrite(vortex->mmio, VORTEX_ADBDMA_BUFBASE + (adbdma << 4) + 0x8, snd_pcm_sgbuf_get_addr(dma->substream, psize * 2)); /* 2 pages */ case 2: dma->cfg0 |= 0x88000000 | 0x44000000 | 0x10000000 | (psize - 1); hwwrite(vortex->mmio, VORTEX_ADBDMA_BUFBASE + (adbdma << 4) + 0x4, snd_pcm_sgbuf_get_addr(dma->substream, psize)); /* 1 page */ case 1: dma->cfg0 |= 0x80000000 | 0x40000000 | ((psize - 1) << 0xc); hwwrite(vortex->mmio, VORTEX_ADBDMA_BUFBASE + (adbdma << 4), snd_pcm_sgbuf_get_addr(dma->substream, 0)); break; } /* printk(KERN_DEBUG "vortex: cfg0 = 0x%x\nvortex: cfg1=0x%x\n", dma->cfg0, dma->cfg1); */ hwwrite(vortex->mmio, VORTEX_ADBDMA_BUFCFG0 + (adbdma << 3), dma->cfg0); hwwrite(vortex->mmio, VORTEX_ADBDMA_BUFCFG1 + (adbdma << 3), dma->cfg1); vortex_adbdma_setfirstbuffer(vortex, adbdma); vortex_adbdma_setstartbuffer(vortex, adbdma, 0); } static void vortex_adbdma_setmode(vortex_t * vortex, int adbdma, int ie, int dir, int fmt, int d, u32 offset) { stream_t *dma = &vortex->dma_adb[adbdma]; dma->dma_unknown = d; dma->dma_ctrl = ((offset & OFFSET_MASK) | (dma->dma_ctrl & ~OFFSET_MASK)); /* Enable PCMOUT interrupts. */ dma->dma_ctrl = (dma->dma_ctrl & ~IE_MASK) | ((ie << IE_SHIFT) & IE_MASK); dma->dma_ctrl = (dma->dma_ctrl & ~DIR_MASK) | ((dir << DIR_SHIFT) & DIR_MASK); dma->dma_ctrl = (dma->dma_ctrl & ~FMT_MASK) | ((fmt << FMT_SHIFT) & FMT_MASK); hwwrite(vortex->mmio, VORTEX_ADBDMA_CTRL + (adbdma << 2), dma->dma_ctrl); hwread(vortex->mmio, VORTEX_ADBDMA_CTRL + (adbdma << 2)); } static int vortex_adbdma_bufshift(vortex_t * vortex, int adbdma) { stream_t *dma = &vortex->dma_adb[adbdma]; int page, p, pp, delta, i; page = (hwread(vortex->mmio, VORTEX_ADBDMA_STAT + (adbdma << 2)) & ADB_SUBBUF_MASK) >> ADB_SUBBUF_SHIFT; if (dma->nr_periods >= 4) delta = (page - dma->period_real) & 3; else { delta = (page - dma->period_real); if (delta < 0) delta += dma->nr_periods; } if (delta == 0) return 0; /* refresh hw page table */ if (dma->nr_periods > 4) { for (i = 0; i < delta; i++) { /* p: audio buffer page index */ p = dma->period_virt + i + 4; if (p >= dma->nr_periods) p -= dma->nr_periods; /* pp: hardware DMA page index. */ pp = dma->period_real + i; if (pp >= 4) pp -= 4; //hwwrite(vortex->mmio, VORTEX_ADBDMA_BUFBASE+(((adbdma << 2)+pp) << 2), dma->table[p].addr); hwwrite(vortex->mmio, VORTEX_ADBDMA_BUFBASE + (((adbdma << 2) + pp) << 2), snd_pcm_sgbuf_get_addr(dma->substream, dma->period_bytes * p)); /* Force write thru cache. */ hwread(vortex->mmio, VORTEX_ADBDMA_BUFBASE + (((adbdma << 2) + pp) << 2)); } } dma->period_virt += delta; dma->period_real = page; if (dma->period_virt >= dma->nr_periods) dma->period_virt -= dma->nr_periods; if (delta != 1) printk(KERN_INFO "vortex: %d virt=%d, real=%d, delta=%d\n", adbdma, dma->period_virt, dma->period_real, delta); return delta; } static void vortex_adbdma_resetup(vortex_t *vortex, int adbdma) { stream_t *dma = &vortex->dma_adb[adbdma]; int p, pp, i; /* refresh hw page table */ for (i=0 ; i < 4 && i < dma->nr_periods; i++) { /* p: audio buffer page index */ p = dma->period_virt + i; if (p >= dma->nr_periods) p -= dma->nr_periods; /* pp: hardware DMA page index. */ pp = dma->period_real + i; if (dma->nr_periods < 4) { if (pp >= dma->nr_periods) pp -= dma->nr_periods; } else { if (pp >= 4) pp -= 4; } hwwrite(vortex->mmio, VORTEX_ADBDMA_BUFBASE + (((adbdma << 2) + pp) << 2), snd_pcm_sgbuf_get_addr(dma->substream, dma->period_bytes * p)); /* Force write thru cache. */ hwread(vortex->mmio, VORTEX_ADBDMA_BUFBASE + (((adbdma << 2)+pp) << 2)); } } static int inline vortex_adbdma_getlinearpos(vortex_t * vortex, int adbdma) { stream_t *dma = &vortex->dma_adb[adbdma]; int temp; temp = hwread(vortex->mmio, VORTEX_ADBDMA_STAT + (adbdma << 2)); temp = (dma->period_virt * dma->period_bytes) + (temp & (dma->period_bytes - 1)); return temp; } static void vortex_adbdma_startfifo(vortex_t * vortex, int adbdma) { int this_8 = 0 /*empty */ , this_4 = 0 /*priority */ ; stream_t *dma = &vortex->dma_adb[adbdma]; switch (dma->fifo_status) { case FIFO_START: vortex_fifo_setadbvalid(vortex, adbdma, dma->fifo_enabled ? 1 : 0); break; case FIFO_STOP: this_8 = 1; hwwrite(vortex->mmio, VORTEX_ADBDMA_CTRL + (adbdma << 2), dma->dma_ctrl); vortex_fifo_setadbctrl(vortex, adbdma, dma->dma_unknown, this_4, this_8, dma->fifo_enabled ? 1 : 0, 0); break; case FIFO_PAUSE: vortex_fifo_setadbctrl(vortex, adbdma, dma->dma_unknown, this_4, this_8, dma->fifo_enabled ? 1 : 0, 0); break; } dma->fifo_status = FIFO_START; } static void vortex_adbdma_resumefifo(vortex_t * vortex, int adbdma) { stream_t *dma = &vortex->dma_adb[adbdma]; int this_8 = 1, this_4 = 0; switch (dma->fifo_status) { case FIFO_STOP: hwwrite(vortex->mmio, VORTEX_ADBDMA_CTRL + (adbdma << 2), dma->dma_ctrl); vortex_fifo_setadbctrl(vortex, adbdma, dma->dma_unknown, this_4, this_8, dma->fifo_enabled ? 1 : 0, 0); break; case FIFO_PAUSE: vortex_fifo_setadbctrl(vortex, adbdma, dma->dma_unknown, this_4, this_8, dma->fifo_enabled ? 1 : 0, 0); break; } dma->fifo_status = FIFO_START; } static void vortex_adbdma_pausefifo(vortex_t * vortex, int adbdma) { stream_t *dma = &vortex->dma_adb[adbdma]; int this_8 = 0, this_4 = 0; switch (dma->fifo_status) { case FIFO_START: vortex_fifo_setadbctrl(vortex, adbdma, dma->dma_unknown, this_4, this_8, 0, 0); break; case FIFO_STOP: hwwrite(vortex->mmio, VORTEX_ADBDMA_CTRL + (adbdma << 2), dma->dma_ctrl); vortex_fifo_setadbctrl(vortex, adbdma, dma->dma_unknown, this_4, this_8, 0, 0); break; } dma->fifo_status = FIFO_PAUSE; } #if 0 // Using pause instead static void vortex_adbdma_stopfifo(vortex_t * vortex, int adbdma) { stream_t *dma = &vortex->dma_adb[adbdma]; int this_4 = 0, this_8 = 0; if (dma->fifo_status == FIFO_START) vortex_fifo_setadbctrl(vortex, adbdma, dma->dma_unknown, this_4, this_8, 0, 0); else if (dma->fifo_status == FIFO_STOP) return; dma->fifo_status = FIFO_STOP; dma->fifo_enabled = 0; } #endif /* WTDMA */ #ifndef CHIP_AU8810 static void vortex_wtdma_setfirstbuffer(vortex_t * vortex, int wtdma) { //int this_7c=dma_ctrl; stream_t *dma = &vortex->dma_wt[wtdma]; hwwrite(vortex->mmio, VORTEX_WTDMA_CTRL + (wtdma << 2), dma->dma_ctrl); } static void vortex_wtdma_setstartbuffer(vortex_t * vortex, int wtdma, int sb) { stream_t *dma = &vortex->dma_wt[wtdma]; //hwwrite(vortex->mmio, VORTEX_WTDMA_START + (wtdma << 2), sb << ((0x1f-(wtdma&0xf)*2))); hwwrite(vortex->mmio, VORTEX_WTDMA_START + (wtdma << 2), sb << ((0xf - (wtdma & 0xf)) * 2)); dma->period_real = dma->period_virt = sb; } static void vortex_wtdma_setbuffers(vortex_t * vortex, int wtdma, int psize, int count) { stream_t *dma = &vortex->dma_wt[wtdma]; dma->period_bytes = psize; dma->nr_periods = count; dma->cfg0 = 0; dma->cfg1 = 0; switch (count) { /* Four or more pages */ default: case 4: dma->cfg1 |= 0x88000000 | 0x44000000 | 0x30000000 | (psize-1); hwwrite(vortex->mmio, VORTEX_WTDMA_BUFBASE + (wtdma << 4) + 0xc, snd_pcm_sgbuf_get_addr(dma->substream, psize * 3)); /* 3 pages */ case 3: dma->cfg0 |= 0x12000000; dma->cfg1 |= 0x80000000 | 0x40000000 | ((psize-1) << 0xc); hwwrite(vortex->mmio, VORTEX_WTDMA_BUFBASE + (wtdma << 4) + 0x8, snd_pcm_sgbuf_get_addr(dma->substream, psize * 2)); /* 2 pages */ case 2: dma->cfg0 |= 0x88000000 | 0x44000000 | 0x10000000 | (psize-1); hwwrite(vortex->mmio, VORTEX_WTDMA_BUFBASE + (wtdma << 4) + 0x4, snd_pcm_sgbuf_get_addr(dma->substream, psize)); /* 1 page */ case 1: dma->cfg0 |= 0x80000000 | 0x40000000 | ((psize-1) << 0xc); hwwrite(vortex->mmio, VORTEX_WTDMA_BUFBASE + (wtdma << 4), snd_pcm_sgbuf_get_addr(dma->substream, 0)); break; } hwwrite(vortex->mmio, VORTEX_WTDMA_BUFCFG0 + (wtdma << 3), dma->cfg0); hwwrite(vortex->mmio, VORTEX_WTDMA_BUFCFG1 + (wtdma << 3), dma->cfg1); vortex_wtdma_setfirstbuffer(vortex, wtdma); vortex_wtdma_setstartbuffer(vortex, wtdma, 0); } static void vortex_wtdma_setmode(vortex_t * vortex, int wtdma, int ie, int fmt, int d, /*int e, */ u32 offset) { stream_t *dma = &vortex->dma_wt[wtdma]; //dma->this_08 = e; dma->dma_unknown = d; dma->dma_ctrl = 0; dma->dma_ctrl = ((offset & OFFSET_MASK) | (dma->dma_ctrl & ~OFFSET_MASK)); /* PCMOUT interrupt */ dma->dma_ctrl = (dma->dma_ctrl & ~IE_MASK) | ((ie << IE_SHIFT) & IE_MASK); /* Always playback. */ dma->dma_ctrl |= (1 << DIR_SHIFT); /* Audio Format */ dma->dma_ctrl = (dma->dma_ctrl & FMT_MASK) | ((fmt << FMT_SHIFT) & FMT_MASK); /* Write into hardware */ hwwrite(vortex->mmio, VORTEX_WTDMA_CTRL + (wtdma << 2), dma->dma_ctrl); } static int vortex_wtdma_bufshift(vortex_t * vortex, int wtdma) { stream_t *dma = &vortex->dma_wt[wtdma]; int page, p, pp, delta, i; page = (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2)) & WT_SUBBUF_MASK) >> WT_SUBBUF_SHIFT; if (dma->nr_periods >= 4) delta = (page - dma->period_real) & 3; else { delta = (page - dma->period_real); if (delta < 0) delta += dma->nr_periods; } if (delta == 0) return 0; /* refresh hw page table */ if (dma->nr_periods > 4) { for (i = 0; i < delta; i++) { /* p: audio buffer page index */ p = dma->period_virt + i + 4; if (p >= dma->nr_periods) p -= dma->nr_periods; /* pp: hardware DMA page index. */ pp = dma->period_real + i; if (pp >= 4) pp -= 4; hwwrite(vortex->mmio, VORTEX_WTDMA_BUFBASE + (((wtdma << 2) + pp) << 2), snd_pcm_sgbuf_get_addr(dma->substream, dma->period_bytes * p)); /* Force write thru cache. */ hwread(vortex->mmio, VORTEX_WTDMA_BUFBASE + (((wtdma << 2) + pp) << 2)); } } dma->period_virt += delta; if (dma->period_virt >= dma->nr_periods) dma->period_virt -= dma->nr_periods; dma->period_real = page; if (delta != 1) printk(KERN_WARNING "vortex: wt virt = %d, delta = %d\n", dma->period_virt, delta); return delta; } #if 0 static void vortex_wtdma_getposition(vortex_t * vortex, int wtdma, int *subbuf, int *pos) { int temp; temp = hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2)); *subbuf = (temp >> WT_SUBBUF_SHIFT) & WT_SUBBUF_MASK; *pos = temp & POS_MASK; } static int vortex_wtdma_getcursubuffer(vortex_t * vortex, int wtdma) { return ((hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2)) >> POS_SHIFT) & POS_MASK); } #endif static int inline vortex_wtdma_getlinearpos(vortex_t * vortex, int wtdma) { stream_t *dma = &vortex->dma_wt[wtdma]; int temp; temp = hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2)); temp = (dma->period_virt * dma->period_bytes) + (temp & (dma->period_bytes - 1)); return temp; } static void vortex_wtdma_startfifo(vortex_t * vortex, int wtdma) { stream_t *dma = &vortex->dma_wt[wtdma]; int this_8 = 0, this_4 = 0; switch (dma->fifo_status) { case FIFO_START: vortex_fifo_setwtvalid(vortex, wtdma, dma->fifo_enabled ? 1 : 0); break; case FIFO_STOP: this_8 = 1; hwwrite(vortex->mmio, VORTEX_WTDMA_CTRL + (wtdma << 2), dma->dma_ctrl); vortex_fifo_setwtctrl(vortex, wtdma, dma->dma_unknown, this_4, this_8, dma->fifo_enabled ? 1 : 0, 0); break; case FIFO_PAUSE: vortex_fifo_setwtctrl(vortex, wtdma, dma->dma_unknown, this_4, this_8, dma->fifo_enabled ? 1 : 0, 0); break; } dma->fifo_status = FIFO_START; } static void vortex_wtdma_resumefifo(vortex_t * vortex, int wtdma) { stream_t *dma = &vortex->dma_wt[wtdma]; int this_8 = 0, this_4 = 0; switch (dma->fifo_status) { case FIFO_STOP: hwwrite(vortex->mmio, VORTEX_WTDMA_CTRL + (wtdma << 2), dma->dma_ctrl); vortex_fifo_setwtctrl(vortex, wtdma, dma->dma_unknown, this_4, this_8, dma->fifo_enabled ? 1 : 0, 0); break; case FIFO_PAUSE: vortex_fifo_setwtctrl(vortex, wtdma, dma->dma_unknown, this_4, this_8, dma->fifo_enabled ? 1 : 0, 0); break; } dma->fifo_status = FIFO_START; } static void vortex_wtdma_pausefifo(vortex_t * vortex, int wtdma) { stream_t *dma = &vortex->dma_wt[wtdma]; int this_8 = 0, this_4 = 0; switch (dma->fifo_status) { case FIFO_START: vortex_fifo_setwtctrl(vortex, wtdma, dma->dma_unknown, this_4, this_8, 0, 0); break; case FIFO_STOP: hwwrite(vortex->mmio, VORTEX_WTDMA_CTRL + (wtdma << 2), dma->dma_ctrl); vortex_fifo_setwtctrl(vortex, wtdma, dma->dma_unknown, this_4, this_8, 0, 0); break; } dma->fifo_status = FIFO_PAUSE; } static void vortex_wtdma_stopfifo(vortex_t * vortex, int wtdma) { stream_t *dma = &vortex->dma_wt[wtdma]; int this_4 = 0, this_8 = 0; if (dma->fifo_status == FIFO_START) vortex_fifo_setwtctrl(vortex, wtdma, dma->dma_unknown, this_4, this_8, 0, 0); else if (dma->fifo_status == FIFO_STOP) return; dma->fifo_status = FIFO_STOP; dma->fifo_enabled = 0; } #endif /* ADB Routes */ typedef int ADBRamLink; static void vortex_adb_init(vortex_t * vortex) { int i; /* it looks like we are writing more than we need to... * if we write what we are supposed to it breaks things... */ hwwrite(vortex->mmio, VORTEX_ADB_SR, 0); for (i = 0; i < VORTEX_ADB_RTBASE_COUNT; i++) hwwrite(vortex->mmio, VORTEX_ADB_RTBASE + (i << 2), hwread(vortex->mmio, VORTEX_ADB_RTBASE + (i << 2)) | ROUTE_MASK); for (i = 0; i < VORTEX_ADB_CHNBASE_COUNT; i++) { hwwrite(vortex->mmio, VORTEX_ADB_CHNBASE + (i << 2), hwread(vortex->mmio, VORTEX_ADB_CHNBASE + (i << 2)) | ROUTE_MASK); } } static void vortex_adb_en_sr(vortex_t * vortex, int channel) { hwwrite(vortex->mmio, VORTEX_ADB_SR, hwread(vortex->mmio, VORTEX_ADB_SR) | (0x1 << channel)); } static void vortex_adb_dis_sr(vortex_t * vortex, int channel) { hwwrite(vortex->mmio, VORTEX_ADB_SR, hwread(vortex->mmio, VORTEX_ADB_SR) & ~(0x1 << channel)); } static void vortex_adb_addroutes(vortex_t * vortex, unsigned char channel, ADBRamLink * route, int rnum) { int temp, prev, lifeboat = 0; if ((rnum <= 0) || (route == NULL)) return; /* Write last routes. */ rnum--; hwwrite(vortex->mmio, VORTEX_ADB_RTBASE + ((route[rnum] & ADB_MASK) << 2), ROUTE_MASK); while (rnum > 0) { hwwrite(vortex->mmio, VORTEX_ADB_RTBASE + ((route[rnum - 1] & ADB_MASK) << 2), route[rnum]); rnum--; } /* Write first route. */ temp = hwread(vortex->mmio, VORTEX_ADB_CHNBASE + (channel << 2)) & ADB_MASK; if (temp == ADB_MASK) { /* First entry on this channel. */ hwwrite(vortex->mmio, VORTEX_ADB_CHNBASE + (channel << 2), route[0]); vortex_adb_en_sr(vortex, channel); return; } /* Not first entry on this channel. Need to link. */ do { prev = temp; temp = hwread(vortex->mmio, VORTEX_ADB_RTBASE + (temp << 2)) & ADB_MASK; if ((lifeboat++) > ADB_MASK) { printk(KERN_ERR "vortex_adb_addroutes: unending route! 0x%x\n", *route); return; } } while (temp != ADB_MASK); hwwrite(vortex->mmio, VORTEX_ADB_RTBASE + (prev << 2), route[0]); } static void vortex_adb_delroutes(vortex_t * vortex, unsigned char channel, ADBRamLink route0, ADBRamLink route1) { int temp, lifeboat = 0, prev; /* Find route. */ temp = hwread(vortex->mmio, VORTEX_ADB_CHNBASE + (channel << 2)) & ADB_MASK; if (temp == (route0 & ADB_MASK)) { temp = hwread(vortex->mmio, VORTEX_ADB_RTBASE + ((route1 & ADB_MASK) << 2)); if ((temp & ADB_MASK) == ADB_MASK) vortex_adb_dis_sr(vortex, channel); hwwrite(vortex->mmio, VORTEX_ADB_CHNBASE + (channel << 2), temp); return; } do { prev = temp; temp = hwread(vortex->mmio, VORTEX_ADB_RTBASE + (prev << 2)) & ADB_MASK; if (((lifeboat++) > ADB_MASK) || (temp == ADB_MASK)) { printk(KERN_ERR "vortex_adb_delroutes: route not found! 0x%x\n", route0); return; } } while (temp != (route0 & ADB_MASK)); temp = hwread(vortex->mmio, VORTEX_ADB_RTBASE + (temp << 2)); if ((temp & ADB_MASK) == route1) temp = hwread(vortex->mmio, VORTEX_ADB_RTBASE + (temp << 2)); /* Make bridge over deleted route. */ hwwrite(vortex->mmio, VORTEX_ADB_RTBASE + (prev << 2), temp); } static void vortex_route(vortex_t * vortex, int en, unsigned char channel, unsigned char source, unsigned char dest) { ADBRamLink route; route = ((source & ADB_MASK) << ADB_SHIFT) | (dest & ADB_MASK); if (en) { vortex_adb_addroutes(vortex, channel, &route, 1); if ((source < (OFFSET_SRCOUT + NR_SRC)) && (source >= OFFSET_SRCOUT)) vortex_src_addWTD(vortex, (source - OFFSET_SRCOUT), channel); else if ((source < (OFFSET_MIXOUT + NR_MIXOUT)) && (source >= OFFSET_MIXOUT)) vortex_mixer_addWTD(vortex, (source - OFFSET_MIXOUT), channel); } else { vortex_adb_delroutes(vortex, channel, route, route); if ((source < (OFFSET_SRCOUT + NR_SRC)) && (source >= OFFSET_SRCOUT)) vortex_src_delWTD(vortex, (source - OFFSET_SRCOUT), channel); else if ((source < (OFFSET_MIXOUT + NR_MIXOUT)) && (source >= OFFSET_MIXOUT)) vortex_mixer_delWTD(vortex, (source - OFFSET_MIXOUT), channel); } } #if 0 static void vortex_routes(vortex_t * vortex, int en, unsigned char channel, unsigned char source, unsigned char dest0, unsigned char dest1) { ADBRamLink route[2]; route[0] = ((source & ADB_MASK) << ADB_SHIFT) | (dest0 & ADB_MASK); route[1] = ((source & ADB_MASK) << ADB_SHIFT) | (dest1 & ADB_MASK); if (en) { vortex_adb_addroutes(vortex, channel, route, 2); if ((source < (OFFSET_SRCOUT + NR_SRC)) && (source >= (OFFSET_SRCOUT))) vortex_src_addWTD(vortex, (source - OFFSET_SRCOUT), channel); else if ((source < (OFFSET_MIXOUT + NR_MIXOUT)) && (source >= (OFFSET_MIXOUT))) vortex_mixer_addWTD(vortex, (source - OFFSET_MIXOUT), channel); } else { vortex_adb_delroutes(vortex, channel, route[0], route[1]); if ((source < (OFFSET_SRCOUT + NR_SRC)) && (source >= (OFFSET_SRCOUT))) vortex_src_delWTD(vortex, (source - OFFSET_SRCOUT), channel); else if ((source < (OFFSET_MIXOUT + NR_MIXOUT)) && (source >= (OFFSET_MIXOUT))) vortex_mixer_delWTD(vortex, (source - OFFSET_MIXOUT), channel); } } #endif /* Route two sources to same target. Sources must be of same class !!! */ static void vortex_routeLRT(vortex_t * vortex, int en, unsigned char ch, unsigned char source0, unsigned char source1, unsigned char dest) { ADBRamLink route[2]; route[0] = ((source0 & ADB_MASK) << ADB_SHIFT) | (dest & ADB_MASK); route[1] = ((source1 & ADB_MASK) << ADB_SHIFT) | (dest & ADB_MASK); if (dest < 0x10) route[1] = (route[1] & ~ADB_MASK) | (dest + 0x20); /* fifo A */ if (en) { vortex_adb_addroutes(vortex, ch, route, 2); if ((source0 < (OFFSET_SRCOUT + NR_SRC)) && (source0 >= OFFSET_SRCOUT)) { vortex_src_addWTD(vortex, (source0 - OFFSET_SRCOUT), ch); vortex_src_addWTD(vortex, (source1 - OFFSET_SRCOUT), ch); } else if ((source0 < (OFFSET_MIXOUT + NR_MIXOUT)) && (source0 >= OFFSET_MIXOUT)) { vortex_mixer_addWTD(vortex, (source0 - OFFSET_MIXOUT), ch); vortex_mixer_addWTD(vortex, (source1 - OFFSET_MIXOUT), ch); } } else { vortex_adb_delroutes(vortex, ch, route[0], route[1]); if ((source0 < (OFFSET_SRCOUT + NR_SRC)) && (source0 >= OFFSET_SRCOUT)) { vortex_src_delWTD(vortex, (source0 - OFFSET_SRCOUT), ch); vortex_src_delWTD(vortex, (source1 - OFFSET_SRCOUT), ch); } else if ((source0 < (OFFSET_MIXOUT + NR_MIXOUT)) && (source0 >= OFFSET_MIXOUT)) { vortex_mixer_delWTD(vortex, (source0 - OFFSET_MIXOUT), ch); vortex_mixer_delWTD(vortex, (source1 - OFFSET_MIXOUT), ch); } } } /* Connection stuff */ // Connect adbdma to src('s). static void vortex_connection_adbdma_src(vortex_t * vortex, int en, unsigned char ch, unsigned char adbdma, unsigned char src) { vortex_route(vortex, en, ch, ADB_DMA(adbdma), ADB_SRCIN(src)); } // Connect SRC to mixin. static void vortex_connection_src_mixin(vortex_t * vortex, int en, unsigned char channel, unsigned char src, unsigned char mixin) { vortex_route(vortex, en, channel, ADB_SRCOUT(src), ADB_MIXIN(mixin)); } // Connect mixin with mix output. static void vortex_connection_mixin_mix(vortex_t * vortex, int en, unsigned char mixin, unsigned char mix, int a) { if (en) { vortex_mix_enableinput(vortex, mix, mixin); vortex_mix_setinputvolumebyte(vortex, mix, mixin, MIX_DEFIGAIN); // added to original code. } else vortex_mix_disableinput(vortex, mix, mixin, a); } // Connect absolut address to mixin. static void vortex_connection_adb_mixin(vortex_t * vortex, int en, unsigned char channel, unsigned char source, unsigned char mixin) { vortex_route(vortex, en, channel, source, ADB_MIXIN(mixin)); } static void vortex_connection_src_adbdma(vortex_t * vortex, int en, unsigned char ch, unsigned char src, unsigned char adbdma) { vortex_route(vortex, en, ch, ADB_SRCOUT(src), ADB_DMA(adbdma)); } static void vortex_connection_src_src_adbdma(vortex_t * vortex, int en, unsigned char ch, unsigned char src0, unsigned char src1, unsigned char adbdma) { vortex_routeLRT(vortex, en, ch, ADB_SRCOUT(src0), ADB_SRCOUT(src1), ADB_DMA(adbdma)); } // mix to absolut address. static void vortex_connection_mix_adb(vortex_t * vortex, int en, unsigned char ch, unsigned char mix, unsigned char dest) { vortex_route(vortex, en, ch, ADB_MIXOUT(mix), dest); vortex_mix_setvolumebyte(vortex, mix, MIX_DEFOGAIN); // added to original code. } // mixer to src. static void vortex_connection_mix_src(vortex_t * vortex, int en, unsigned char ch, unsigned char mix, unsigned char src) { vortex_route(vortex, en, ch, ADB_MIXOUT(mix), ADB_SRCIN(src)); vortex_mix_setvolumebyte(vortex, mix, MIX_DEFOGAIN); // added to original code. } #if 0 static void vortex_connection_adbdma_src_src(vortex_t * vortex, int en, unsigned char channel, unsigned char adbdma, unsigned char src0, unsigned char src1) { vortex_routes(vortex, en, channel, ADB_DMA(adbdma), ADB_SRCIN(src0), ADB_SRCIN(src1)); } // Connect two mix to AdbDma. static void vortex_connection_mix_mix_adbdma(vortex_t * vortex, int en, unsigned char ch, unsigned char mix0, unsigned char mix1, unsigned char adbdma) { ADBRamLink routes[2]; routes[0] = (((mix0 + OFFSET_MIXOUT) & ADB_MASK) << ADB_SHIFT) | (adbdma & ADB_MASK); routes[1] = (((mix1 + OFFSET_MIXOUT) & ADB_MASK) << ADB_SHIFT) | ((adbdma + 0x20) & ADB_MASK); if (en) { vortex_adb_addroutes(vortex, ch, routes, 0x2); vortex_mixer_addWTD(vortex, mix0, ch); vortex_mixer_addWTD(vortex, mix1, ch); } else { vortex_adb_delroutes(vortex, ch, routes[0], routes[1]); vortex_mixer_delWTD(vortex, mix0, ch); vortex_mixer_delWTD(vortex, mix1, ch); } } #endif /* CODEC connect. */ static void vortex_connect_codecplay(vortex_t * vortex, int en, unsigned char mixers[]) { #ifdef CHIP_AU8820 vortex_connection_mix_adb(vortex, en, 0x11, mixers[0], ADB_CODECOUT(0)); vortex_connection_mix_adb(vortex, en, 0x11, mixers[1], ADB_CODECOUT(1)); #else #if 1 // Connect front channels through EQ. vortex_connection_mix_adb(vortex, en, 0x11, mixers[0], ADB_EQIN(0)); vortex_connection_mix_adb(vortex, en, 0x11, mixers[1], ADB_EQIN(1)); /* Lower volume, since EQ has some gain. */ vortex_mix_setvolumebyte(vortex, mixers[0], 0); vortex_mix_setvolumebyte(vortex, mixers[1], 0); vortex_route(vortex, en, 0x11, ADB_EQOUT(0), ADB_CODECOUT(0)); vortex_route(vortex, en, 0x11, ADB_EQOUT(1), ADB_CODECOUT(1)); /* Check if reg 0x28 has SDAC bit set. */ if (VORTEX_IS_QUAD(vortex)) { /* Rear channel. Note: ADB_CODECOUT(0+2) and (1+2) is for AC97 modem */ vortex_connection_mix_adb(vortex, en, 0x11, mixers[2], ADB_CODECOUT(0 + 4)); vortex_connection_mix_adb(vortex, en, 0x11, mixers[3], ADB_CODECOUT(1 + 4)); /* printk(KERN_DEBUG "SDAC detected "); */ } #else // Use plain direct output to codec. vortex_connection_mix_adb(vortex, en, 0x11, mixers[0], ADB_CODECOUT(0)); vortex_connection_mix_adb(vortex, en, 0x11, mixers[1], ADB_CODECOUT(1)); #endif #endif } static void vortex_connect_codecrec(vortex_t * vortex, int en, unsigned char mixin0, unsigned char mixin1) { /* Enable: 0x1, 0x1 Channel: 0x11, 0x11 ADB Source address: 0x48, 0x49 Destination Asp4Topology_0x9c,0x98 */ vortex_connection_adb_mixin(vortex, en, 0x11, ADB_CODECIN(0), mixin0); vortex_connection_adb_mixin(vortex, en, 0x11, ADB_CODECIN(1), mixin1); } // Higher level ADB audio path (de)allocator. /* Resource manager */ static int resnum[VORTEX_RESOURCE_LAST] = { NR_ADB, NR_SRC, NR_MIXIN, NR_MIXOUT, NR_A3D }; /* Checkout/Checkin resource of given type. resmap: resource map to be used. If NULL means that we want to allocate a DMA resource (root of all other resources of a dma channel). out: Mean checkout if != 0. Else mean Checkin resource. restype: Indicates type of resource to be checked in or out. */ static char vortex_adb_checkinout(vortex_t * vortex, int resmap[], int out, int restype) { int i, qty = resnum[restype], resinuse = 0; if (out) { /* Gather used resources by all streams. */ for (i = 0; i < NR_ADB; i++) { resinuse |= vortex->dma_adb[i].resources[restype]; } resinuse |= vortex->fixed_res[restype]; /* Find and take free resource. */ for (i = 0; i < qty; i++) { if ((resinuse & (1 << i)) == 0) { if (resmap != NULL) resmap[restype] |= (1 << i); else vortex->dma_adb[i].resources[restype] |= (1 << i); /* printk(KERN_DEBUG "vortex: ResManager: type %d out %d\n", restype, i); */ return i; } } } else { if (resmap == NULL) return -EINVAL; /* Checkin first resource of type restype. */ for (i = 0; i < qty; i++) { if (resmap[restype] & (1 << i)) { resmap[restype] &= ~(1 << i); /* printk(KERN_DEBUG "vortex: ResManager: type %d in %d\n", restype, i); */ return i; } } } printk(KERN_ERR "vortex: FATAL: ResManager: resource type %d exhausted.\n", restype); return -ENOMEM; } /* Default Connections */ static int vortex_adb_allocroute(vortex_t * vortex, int dma, int nr_ch, int dir, int type); static void vortex_connect_default(vortex_t * vortex, int en) { // Connect AC97 codec. vortex->mixplayb[0] = vortex_adb_checkinout(vortex, vortex->fixed_res, en, VORTEX_RESOURCE_MIXOUT); vortex->mixplayb[1] = vortex_adb_checkinout(vortex, vortex->fixed_res, en, VORTEX_RESOURCE_MIXOUT); if (VORTEX_IS_QUAD(vortex)) { vortex->mixplayb[2] = vortex_adb_checkinout(vortex, vortex->fixed_res, en, VORTEX_RESOURCE_MIXOUT); vortex->mixplayb[3] = vortex_adb_checkinout(vortex, vortex->fixed_res, en, VORTEX_RESOURCE_MIXOUT); } vortex_connect_codecplay(vortex, en, vortex->mixplayb); vortex->mixcapt[0] = vortex_adb_checkinout(vortex, vortex->fixed_res, en, VORTEX_RESOURCE_MIXIN); vortex->mixcapt[1] = vortex_adb_checkinout(vortex, vortex->fixed_res, en, VORTEX_RESOURCE_MIXIN); vortex_connect_codecrec(vortex, en, MIX_CAPT(0), MIX_CAPT(1)); // Connect SPDIF #ifndef CHIP_AU8820 vortex->mixspdif[0] = vortex_adb_checkinout(vortex, vortex->fixed_res, en, VORTEX_RESOURCE_MIXOUT); vortex->mixspdif[1] = vortex_adb_checkinout(vortex, vortex->fixed_res, en, VORTEX_RESOURCE_MIXOUT); vortex_connection_mix_adb(vortex, en, 0x14, vortex->mixspdif[0], ADB_SPDIFOUT(0)); vortex_connection_mix_adb(vortex, en, 0x14, vortex->mixspdif[1], ADB_SPDIFOUT(1)); #endif // Connect WT #ifndef CHIP_AU8810 vortex_wt_connect(vortex, en); #endif // A3D (crosstalk canceler and A3D slices). AU8810 disabled for now. #ifndef CHIP_AU8820 vortex_Vort3D_connect(vortex, en); #endif // Connect I2S // Connect DSP interface for SQ3500 turbo (not here i think...) // Connect AC98 modem codec } /* Allocate nr_ch pcm audio routes if dma < 0. If dma >= 0, existing routes are deallocated. dma: DMA engine routes to be deallocated when dma >= 0. nr_ch: Number of channels to be de/allocated. dir: direction of stream. Uses same values as substream->stream. type: Type of audio output/source (codec, spdif, i2s, dsp, etc) Return: Return allocated DMA or same DMA passed as "dma" when dma >= 0. */ static int vortex_adb_allocroute(vortex_t * vortex, int dma, int nr_ch, int dir, int type) { stream_t *stream; int i, en; if ((nr_ch == 3) || ((dir == SNDRV_PCM_STREAM_CAPTURE) && (nr_ch > 2))) return -EBUSY; if (dma >= 0) { en = 0; vortex_adb_checkinout(vortex, vortex->dma_adb[dma].resources, en, VORTEX_RESOURCE_DMA); } else { en = 1; if ((dma = vortex_adb_checkinout(vortex, NULL, en, VORTEX_RESOURCE_DMA)) < 0) return -EBUSY; } stream = &vortex->dma_adb[dma]; stream->dma = dma; stream->dir = dir; stream->type = type; /* PLAYBACK ROUTES. */ if (dir == SNDRV_PCM_STREAM_PLAYBACK) { int src[4], mix[4], ch_top; #ifndef CHIP_AU8820 int a3d = 0; #endif /* Get SRC and MIXER hardware resources. */ if (stream->type != VORTEX_PCM_SPDIF) { for (i = 0; i < nr_ch; i++) { if ((src[i] = vortex_adb_checkinout(vortex, stream->resources, en, VORTEX_RESOURCE_SRC)) < 0) { memset(stream->resources, 0, sizeof(unsigned char) * VORTEX_RESOURCE_LAST); return -EBUSY; } if (stream->type != VORTEX_PCM_A3D) { if ((mix[i] = vortex_adb_checkinout(vortex, stream->resources, en, VORTEX_RESOURCE_MIXIN)) < 0) { memset(stream->resources, 0, sizeof(unsigned char) * VORTEX_RESOURCE_LAST); return -EBUSY; } } } } #ifndef CHIP_AU8820 if (stream->type == VORTEX_PCM_A3D) { if ((a3d = vortex_adb_checkinout(vortex, stream->resources, en, VORTEX_RESOURCE_A3D)) < 0) { memset(stream->resources, 0, sizeof(unsigned char) * VORTEX_RESOURCE_LAST); printk(KERN_ERR "vortex: out of A3D sources. Sorry\n"); return -EBUSY; } /* (De)Initialize A3D hardware source. */ vortex_Vort3D_InitializeSource(&(vortex->a3d[a3d]), en); } /* Make SPDIF out exclusive to "spdif" device when in use. */ if ((stream->type == VORTEX_PCM_SPDIF) && (en)) { vortex_route(vortex, 0, 0x14, ADB_MIXOUT(vortex->mixspdif[0]), ADB_SPDIFOUT(0)); vortex_route(vortex, 0, 0x14, ADB_MIXOUT(vortex->mixspdif[1]), ADB_SPDIFOUT(1)); } #endif /* Make playback routes. */ for (i = 0; i < nr_ch; i++) { if (stream->type == VORTEX_PCM_ADB) { vortex_connection_adbdma_src(vortex, en, src[nr_ch - 1], dma, src[i]); vortex_connection_src_mixin(vortex, en, 0x11, src[i], mix[i]); vortex_connection_mixin_mix(vortex, en, mix[i], MIX_PLAYB(i), 0); #ifndef CHIP_AU8820 vortex_connection_mixin_mix(vortex, en, mix[i], MIX_SPDIF(i % 2), 0); vortex_mix_setinputvolumebyte(vortex, MIX_SPDIF(i % 2), mix[i], MIX_DEFIGAIN); #endif } #ifndef CHIP_AU8820 if (stream->type == VORTEX_PCM_A3D) { vortex_connection_adbdma_src(vortex, en, src[nr_ch - 1], dma, src[i]); vortex_route(vortex, en, 0x11, ADB_SRCOUT(src[i]), ADB_A3DIN(a3d)); /* XTalk test. */ //vortex_route(vortex, en, 0x11, dma, ADB_XTALKIN(i?9:4)); //vortex_route(vortex, en, 0x11, ADB_SRCOUT(src[i]), ADB_XTALKIN(i?4:9)); } if (stream->type == VORTEX_PCM_SPDIF) vortex_route(vortex, en, 0x14, ADB_DMA(stream->dma), ADB_SPDIFOUT(i)); #endif } if (stream->type != VORTEX_PCM_SPDIF && stream->type != VORTEX_PCM_A3D) { ch_top = (VORTEX_IS_QUAD(vortex) ? 4 : 2); for (i = nr_ch; i < ch_top; i++) { vortex_connection_mixin_mix(vortex, en, mix[i % nr_ch], MIX_PLAYB(i), 0); #ifndef CHIP_AU8820 vortex_connection_mixin_mix(vortex, en, mix[i % nr_ch], MIX_SPDIF(i % 2), 0); vortex_mix_setinputvolumebyte(vortex, MIX_SPDIF(i % 2), mix[i % nr_ch], MIX_DEFIGAIN); #endif } } #ifndef CHIP_AU8820 else { if (nr_ch == 1 && stream->type == VORTEX_PCM_SPDIF) vortex_route(vortex, en, 0x14, ADB_DMA(stream->dma), ADB_SPDIFOUT(1)); } /* Reconnect SPDIF out when "spdif" device is down. */ if ((stream->type == VORTEX_PCM_SPDIF) && (!en)) { vortex_route(vortex, 1, 0x14, ADB_MIXOUT(vortex->mixspdif[0]), ADB_SPDIFOUT(0)); vortex_route(vortex, 1, 0x14, ADB_MIXOUT(vortex->mixspdif[1]), ADB_SPDIFOUT(1)); } #endif /* CAPTURE ROUTES. */ } else { int src[2], mix[2]; /* Get SRC and MIXER hardware resources. */ for (i = 0; i < nr_ch; i++) { if ((mix[i] = vortex_adb_checkinout(vortex, stream->resources, en, VORTEX_RESOURCE_MIXOUT)) < 0) { memset(stream->resources, 0, sizeof(unsigned char) * VORTEX_RESOURCE_LAST); return -EBUSY; } if ((src[i] = vortex_adb_checkinout(vortex, stream->resources, en, VORTEX_RESOURCE_SRC)) < 0) { memset(stream->resources, 0, sizeof(unsigned char) * VORTEX_RESOURCE_LAST); return -EBUSY; } } /* Make capture routes. */ vortex_connection_mixin_mix(vortex, en, MIX_CAPT(0), mix[0], 0); vortex_connection_mix_src(vortex, en, 0x11, mix[0], src[0]); if (nr_ch == 1) { vortex_connection_mixin_mix(vortex, en, MIX_CAPT(1), mix[0], 0); vortex_connection_src_adbdma(vortex, en, src[0], src[0], dma); } else { vortex_connection_mixin_mix(vortex, en, MIX_CAPT(1), mix[1], 0); vortex_connection_mix_src(vortex, en, 0x11, mix[1], src[1]); vortex_connection_src_src_adbdma(vortex, en, src[1], src[0], src[1], dma); } } vortex->dma_adb[dma].nr_ch = nr_ch; #if 0 /* AC97 Codec channel setup. FIXME: this has no effect on some cards !! */ if (nr_ch < 4) { /* Copy stereo to rear channel (surround) */ snd_ac97_write_cache(vortex->codec, AC97_SIGMATEL_DAC2INVERT, snd_ac97_read(vortex->codec, AC97_SIGMATEL_DAC2INVERT) | 4); } else { /* Allow separate front and rear channels. */ snd_ac97_write_cache(vortex->codec, AC97_SIGMATEL_DAC2INVERT, snd_ac97_read(vortex->codec, AC97_SIGMATEL_DAC2INVERT) & ~((u32) 4)); } #endif return dma; } /* Set the SampleRate of the SRC's attached to the given DMA engine. */ static void vortex_adb_setsrc(vortex_t * vortex, int adbdma, unsigned int rate, int dir) { stream_t *stream = &(vortex->dma_adb[adbdma]); int i, cvrt; /* dir=1:play ; dir=0:rec */ if (dir) cvrt = SRC_RATIO(rate, 48000); else cvrt = SRC_RATIO(48000, rate); /* Setup SRC's */ for (i = 0; i < NR_SRC; i++) { if (stream->resources[VORTEX_RESOURCE_SRC] & (1 << i)) vortex_src_setupchannel(vortex, i, cvrt, 0, 0, i, dir, 1, cvrt, dir); } } // Timer and ISR functions. static void vortex_settimer(vortex_t * vortex, int period) { //set the timer period to <period> 48000ths of a second. hwwrite(vortex->mmio, VORTEX_IRQ_STAT, period); } #if 0 static void vortex_enable_timer_int(vortex_t * card) { hwwrite(card->mmio, VORTEX_IRQ_CTRL, hwread(card->mmio, VORTEX_IRQ_CTRL) | IRQ_TIMER | 0x60); } static void vortex_disable_timer_int(vortex_t * card) { hwwrite(card->mmio, VORTEX_IRQ_CTRL, hwread(card->mmio, VORTEX_IRQ_CTRL) & ~IRQ_TIMER); } #endif static void vortex_enable_int(vortex_t * card) { // CAsp4ISR__EnableVortexInt_void_ hwwrite(card->mmio, VORTEX_CTRL, hwread(card->mmio, VORTEX_CTRL) | CTRL_IRQ_ENABLE); hwwrite(card->mmio, VORTEX_IRQ_CTRL, (hwread(card->mmio, VORTEX_IRQ_CTRL) & 0xffffefc0) | 0x24); } static void vortex_disable_int(vortex_t * card) { hwwrite(card->mmio, VORTEX_CTRL, hwread(card->mmio, VORTEX_CTRL) & ~CTRL_IRQ_ENABLE); } static irqreturn_t vortex_interrupt(int irq, void *dev_id) { vortex_t *vortex = dev_id; int i, handled; u32 source; //check if the interrupt is ours. if (!(hwread(vortex->mmio, VORTEX_STAT) & 0x1)) return IRQ_NONE; // This is the Interrupt Enable flag we set before (consistency check). if (!(hwread(vortex->mmio, VORTEX_CTRL) & CTRL_IRQ_ENABLE)) return IRQ_NONE; source = hwread(vortex->mmio, VORTEX_IRQ_SOURCE); // Reset IRQ flags. hwwrite(vortex->mmio, VORTEX_IRQ_SOURCE, source); hwread(vortex->mmio, VORTEX_IRQ_SOURCE); // Is at least one IRQ flag set? if (source == 0) { printk(KERN_ERR "vortex: missing irq source\n"); return IRQ_NONE; } handled = 0; // Attend every interrupt source. if (unlikely(source & IRQ_ERR_MASK)) { if (source & IRQ_FATAL) { printk(KERN_ERR "vortex: IRQ fatal error\n"); } if (source & IRQ_PARITY) { printk(KERN_ERR "vortex: IRQ parity error\n"); } if (source & IRQ_REG) { printk(KERN_ERR "vortex: IRQ reg error\n"); } if (source & IRQ_FIFO) { printk(KERN_ERR "vortex: IRQ fifo error\n"); } if (source & IRQ_DMA) { printk(KERN_ERR "vortex: IRQ dma error\n"); } handled = 1; } if (source & IRQ_PCMOUT) { /* ALSA period acknowledge. */ spin_lock(&vortex->lock); for (i = 0; i < NR_ADB; i++) { if (vortex->dma_adb[i].fifo_status == FIFO_START) { if (!vortex_adbdma_bufshift(vortex, i)) continue; spin_unlock(&vortex->lock); snd_pcm_period_elapsed(vortex->dma_adb[i]. substream); spin_lock(&vortex->lock); } } #ifndef CHIP_AU8810 for (i = 0; i < NR_WT; i++) { if (vortex->dma_wt[i].fifo_status == FIFO_START) { if (vortex_wtdma_bufshift(vortex, i)) ; spin_unlock(&vortex->lock); snd_pcm_period_elapsed(vortex->dma_wt[i]. substream); spin_lock(&vortex->lock); } } #endif spin_unlock(&vortex->lock); handled = 1; } //Acknowledge the Timer interrupt if (source & IRQ_TIMER) { hwread(vortex->mmio, VORTEX_IRQ_STAT); handled = 1; } if (source & IRQ_MIDI) { snd_mpu401_uart_interrupt(vortex->irq, vortex->rmidi->private_data); handled = 1; } if (!handled) { printk(KERN_ERR "vortex: unknown irq source %x\n", source); } return IRQ_RETVAL(handled); } /* Codec */ #define POLL_COUNT 1000 static void vortex_codec_init(vortex_t * vortex) { int i; for (i = 0; i < 32; i++) { /* the windows driver writes -i, so we write -i */ hwwrite(vortex->mmio, (VORTEX_CODEC_CHN + (i << 2)), -i); msleep(2); } if (0) { hwwrite(vortex->mmio, VORTEX_CODEC_CTRL, 0x8068); msleep(1); hwwrite(vortex->mmio, VORTEX_CODEC_CTRL, 0x00e8); msleep(1); } else { hwwrite(vortex->mmio, VORTEX_CODEC_CTRL, 0x00a8); msleep(2); hwwrite(vortex->mmio, VORTEX_CODEC_CTRL, 0x80a8); msleep(2); hwwrite(vortex->mmio, VORTEX_CODEC_CTRL, 0x80e8); msleep(2); hwwrite(vortex->mmio, VORTEX_CODEC_CTRL, 0x80a8); msleep(2); hwwrite(vortex->mmio, VORTEX_CODEC_CTRL, 0x00a8); msleep(2); hwwrite(vortex->mmio, VORTEX_CODEC_CTRL, 0x00e8); } for (i = 0; i < 32; i++) { hwwrite(vortex->mmio, (VORTEX_CODEC_CHN + (i << 2)), -i); msleep(5); } hwwrite(vortex->mmio, VORTEX_CODEC_CTRL, 0xe8); msleep(1); /* Enable codec channels 0 and 1. */ hwwrite(vortex->mmio, VORTEX_CODEC_EN, hwread(vortex->mmio, VORTEX_CODEC_EN) | EN_CODEC); } static void vortex_codec_write(struct snd_ac97 * codec, unsigned short addr, unsigned short data) { vortex_t *card = (vortex_t *) codec->private_data; unsigned int lifeboat = 0; /* wait for transactions to clear */ while (!(hwread(card->mmio, VORTEX_CODEC_CTRL) & 0x100)) { udelay(100); if (lifeboat++ > POLL_COUNT) { printk(KERN_ERR "vortex: ac97 codec stuck busy\n"); return; } } /* write register */ hwwrite(card->mmio, VORTEX_CODEC_IO, ((addr << VORTEX_CODEC_ADDSHIFT) & VORTEX_CODEC_ADDMASK) | ((data << VORTEX_CODEC_DATSHIFT) & VORTEX_CODEC_DATMASK) | VORTEX_CODEC_WRITE | (codec->num << VORTEX_CODEC_ID_SHIFT) ); /* Flush Caches. */ hwread(card->mmio, VORTEX_CODEC_IO); } static unsigned short vortex_codec_read(struct snd_ac97 * codec, unsigned short addr) { vortex_t *card = (vortex_t *) codec->private_data; u32 read_addr, data; unsigned lifeboat = 0; /* wait for transactions to clear */ while (!(hwread(card->mmio, VORTEX_CODEC_CTRL) & 0x100)) { udelay(100); if (lifeboat++ > POLL_COUNT) { printk(KERN_ERR "vortex: ac97 codec stuck busy\n"); return 0xffff; } } /* set up read address */ read_addr = ((addr << VORTEX_CODEC_ADDSHIFT) & VORTEX_CODEC_ADDMASK) | (codec->num << VORTEX_CODEC_ID_SHIFT) ; hwwrite(card->mmio, VORTEX_CODEC_IO, read_addr); /* wait for address */ do { udelay(100); data = hwread(card->mmio, VORTEX_CODEC_IO); if (lifeboat++ > POLL_COUNT) { printk(KERN_ERR "vortex: ac97 address never arrived\n"); return 0xffff; } } while ((data & VORTEX_CODEC_ADDMASK) != (addr << VORTEX_CODEC_ADDSHIFT)); /* return data. */ return (u16) (data & VORTEX_CODEC_DATMASK); } /* SPDIF support */ static void vortex_spdif_init(vortex_t * vortex, int spdif_sr, int spdif_mode) { int i, this_38 = 0, this_04 = 0, this_08 = 0, this_0c = 0; /* CAsp4Spdif::InitializeSpdifHardware(void) */ hwwrite(vortex->mmio, VORTEX_SPDIF_FLAGS, hwread(vortex->mmio, VORTEX_SPDIF_FLAGS) & 0xfff3fffd); //for (i=0x291D4; i<0x29200; i+=4) for (i = 0; i < 11; i++) hwwrite(vortex->mmio, VORTEX_SPDIF_CFG1 + (i << 2), 0); //hwwrite(vortex->mmio, 0x29190, hwread(vortex->mmio, 0x29190) | 0xc0000); hwwrite(vortex->mmio, VORTEX_CODEC_EN, hwread(vortex->mmio, VORTEX_CODEC_EN) | EN_SPDIF); /* CAsp4Spdif::ProgramSRCInHardware(enum SPDIF_SR,enum SPDIFMODE) */ if (this_04 && this_08) { int edi; i = (((0x5DC00000 / spdif_sr) + 1) >> 1); if (i > 0x800) { if (i < 0x1ffff) edi = (i >> 1); else edi = 0x1ffff; } else { i = edi = 0x800; } /* this_04 and this_08 are the CASp4Src's (samplerate converters) */ vortex_src_setupchannel(vortex, this_04, edi, 0, 1, this_0c, 1, 0, edi, 1); vortex_src_setupchannel(vortex, this_08, edi, 0, 1, this_0c, 1, 0, edi, 1); } i = spdif_sr; spdif_sr |= 0x8c; switch (i) { case 32000: this_38 &= 0xFFFFFFFE; this_38 &= 0xFFFFFFFD; this_38 &= 0xF3FFFFFF; this_38 |= 0x03000000; /* set 32khz samplerate */ this_38 &= 0xFFFFFF3F; spdif_sr &= 0xFFFFFFFD; spdif_sr |= 1; break; case 44100: this_38 &= 0xFFFFFFFE; this_38 &= 0xFFFFFFFD; this_38 &= 0xF0FFFFFF; this_38 |= 0x03000000; this_38 &= 0xFFFFFF3F; spdif_sr &= 0xFFFFFFFC; break; case 48000: if (spdif_mode == 1) { this_38 &= 0xFFFFFFFE; this_38 &= 0xFFFFFFFD; this_38 &= 0xF2FFFFFF; this_38 |= 0x02000000; /* set 48khz samplerate */ this_38 &= 0xFFFFFF3F; } else { /* J. Gordon Wolfe: I think this stuff is for AC3 */ this_38 |= 0x00000003; this_38 &= 0xFFFFFFBF; this_38 |= 0x80; } spdif_sr |= 2; spdif_sr &= 0xFFFFFFFE; break; } /* looks like the next 2 lines transfer a 16-bit value into 2 8-bit registers. seems to be for the standard IEC/SPDIF initialization stuff */ hwwrite(vortex->mmio, VORTEX_SPDIF_CFG0, this_38 & 0xffff); hwwrite(vortex->mmio, VORTEX_SPDIF_CFG1, this_38 >> 0x10); hwwrite(vortex->mmio, VORTEX_SPDIF_SMPRATE, spdif_sr); } /* Initialization */ static int __devinit vortex_core_init(vortex_t * vortex) { printk(KERN_INFO "Vortex: init.... "); /* Hardware Init. */ hwwrite(vortex->mmio, VORTEX_CTRL, 0xffffffff); msleep(5); hwwrite(vortex->mmio, VORTEX_CTRL, hwread(vortex->mmio, VORTEX_CTRL) & 0xffdfffff); msleep(5); /* Reset IRQ flags */ hwwrite(vortex->mmio, VORTEX_IRQ_SOURCE, 0xffffffff); hwread(vortex->mmio, VORTEX_IRQ_STAT); vortex_codec_init(vortex); #ifdef CHIP_AU8830 hwwrite(vortex->mmio, VORTEX_CTRL, hwread(vortex->mmio, VORTEX_CTRL) | 0x1000000); #endif /* Init audio engine. */ vortex_adbdma_init(vortex); hwwrite(vortex->mmio, VORTEX_ENGINE_CTRL, 0x0); //, 0xc83c7e58, 0xc5f93e58 vortex_adb_init(vortex); /* Init processing blocks. */ vortex_fifo_init(vortex); vortex_mixer_init(vortex); vortex_srcblock_init(vortex); #ifndef CHIP_AU8820 vortex_eq_init(vortex); vortex_spdif_init(vortex, 48000, 1); vortex_Vort3D_enable(vortex); #endif #ifndef CHIP_AU8810 vortex_wt_init(vortex); #endif // Moved to au88x0.c //vortex_connect_default(vortex, 1); vortex_settimer(vortex, 0x90); // Enable Interrupts. // vortex_enable_int() must be first !! // hwwrite(vortex->mmio, VORTEX_IRQ_CTRL, 0); // vortex_enable_int(vortex); //vortex_enable_timer_int(vortex); //vortex_disable_timer_int(vortex); printk(KERN_INFO "done.\n"); spin_lock_init(&vortex->lock); return 0; } static int vortex_core_shutdown(vortex_t * vortex) { printk(KERN_INFO "Vortex: shutdown..."); #ifndef CHIP_AU8820 vortex_eq_free(vortex); vortex_Vort3D_disable(vortex); #endif //vortex_disable_timer_int(vortex); vortex_disable_int(vortex); vortex_connect_default(vortex, 0); /* Reset all DMA fifos. */ vortex_fifo_init(vortex); /* Erase all audio routes. */ vortex_adb_init(vortex); /* Disable MPU401 */ //hwwrite(vortex->mmio, VORTEX_IRQ_CTRL, hwread(vortex->mmio, VORTEX_IRQ_CTRL) & ~IRQ_MIDI); //hwwrite(vortex->mmio, VORTEX_CTRL, hwread(vortex->mmio, VORTEX_CTRL) & ~CTRL_MIDI_EN); hwwrite(vortex->mmio, VORTEX_IRQ_CTRL, 0); hwwrite(vortex->mmio, VORTEX_CTRL, 0); msleep(5); hwwrite(vortex->mmio, VORTEX_IRQ_SOURCE, 0xffff); printk(KERN_INFO "done.\n"); return 0; } /* Alsa support. */ static int vortex_alsafmt_aspfmt(int alsafmt) { int fmt; switch (alsafmt) { case SNDRV_PCM_FORMAT_U8: fmt = 0x1; break; case SNDRV_PCM_FORMAT_MU_LAW: fmt = 0x2; break; case SNDRV_PCM_FORMAT_A_LAW: fmt = 0x3; break; case SNDRV_PCM_FORMAT_SPECIAL: fmt = 0x4; /* guess. */ break; case SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE: fmt = 0x5; /* guess. */ break; case SNDRV_PCM_FORMAT_S16_LE: fmt = 0x8; break; case SNDRV_PCM_FORMAT_S16_BE: fmt = 0x9; /* check this... */ break; default: fmt = 0x8; printk(KERN_ERR "vortex: format unsupported %d\n", alsafmt); break; } return fmt; } /* Some not yet useful translations. */ #if 0 typedef enum { ASPFMTLINEAR16 = 0, /* 0x8 */ ASPFMTLINEAR8, /* 0x1 */ ASPFMTULAW, /* 0x2 */ ASPFMTALAW, /* 0x3 */ ASPFMTSPORT, /* ? */ ASPFMTSPDIF, /* ? */ } ASPENCODING; static int vortex_translateformat(vortex_t * vortex, char bits, char nch, int encod) { int a, this_194; if ((bits != 8) && (bits != 16)) return -1; switch (encod) { case 0: if (bits == 0x10) a = 8; // 16 bit break; case 1: if (bits == 8) a = 1; // 8 bit break; case 2: a = 2; // U_LAW break; case 3: a = 3; // A_LAW break; } switch (nch) { case 1: this_194 = 0; break; case 2: this_194 = 1; break; case 4: this_194 = 1; break; case 6: this_194 = 1; break; } return (a); } static void vortex_cdmacore_setformat(vortex_t * vortex, int bits, int nch) { short int d, this_148; d = ((bits >> 3) * nch); this_148 = 0xbb80 / d; } #endif
gpl-2.0
baberthal/linux
drivers/misc/cs5535-mfgpt.c
1545
10440
/* * Driver for the CS5535/CS5536 Multi-Function General Purpose Timers (MFGPT) * * Copyright (C) 2006, Advanced Micro Devices, Inc. * Copyright (C) 2007 Andres Salomon <dilinger@debian.org> * Copyright (C) 2009 Andres Salomon <dilinger@collabora.co.uk> * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public License * as published by the Free Software Foundation. * * The MFGPTs are documented in AMD Geode CS5536 Companion Device Data Book. */ #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/cs5535.h> #include <linux/slab.h> #define DRV_NAME "cs5535-mfgpt" static int mfgpt_reset_timers; module_param_named(mfgptfix, mfgpt_reset_timers, int, 0644); MODULE_PARM_DESC(mfgptfix, "Try to reset the MFGPT timers during init; " "required by some broken BIOSes (ie, TinyBIOS < 0.99) or kexec " "(1 = reset the MFGPT using an undocumented bit, " "2 = perform a soft reset by unconfiguring all timers); " "use what works best for you."); struct cs5535_mfgpt_timer { struct cs5535_mfgpt_chip *chip; int nr; }; static struct cs5535_mfgpt_chip { DECLARE_BITMAP(avail, MFGPT_MAX_TIMERS); resource_size_t base; struct platform_device *pdev; spinlock_t lock; int initialized; } cs5535_mfgpt_chip; int cs5535_mfgpt_toggle_event(struct cs5535_mfgpt_timer *timer, int cmp, int event, int enable) { uint32_t msr, mask, value, dummy; int shift = (cmp == MFGPT_CMP1) ? 0 : 8; if (!timer) { WARN_ON(1); return -EIO; } /* * The register maps for these are described in sections 6.17.1.x of * the AMD Geode CS5536 Companion Device Data Book. */ switch (event) { case MFGPT_EVENT_RESET: /* * XXX: According to the docs, we cannot reset timers above * 6; that is, resets for 7 and 8 will be ignored. Is this * a problem? -dilinger */ msr = MSR_MFGPT_NR; mask = 1 << (timer->nr + 24); break; case MFGPT_EVENT_NMI: msr = MSR_MFGPT_NR; mask = 1 << (timer->nr + shift); break; case MFGPT_EVENT_IRQ: msr = MSR_MFGPT_IRQ; mask = 1 << (timer->nr + shift); break; default: return -EIO; } rdmsr(msr, value, dummy); if (enable) value |= mask; else value &= ~mask; wrmsr(msr, value, dummy); return 0; } EXPORT_SYMBOL_GPL(cs5535_mfgpt_toggle_event); int cs5535_mfgpt_set_irq(struct cs5535_mfgpt_timer *timer, int cmp, int *irq, int enable) { uint32_t zsel, lpc, dummy; int shift; if (!timer) { WARN_ON(1); return -EIO; } /* * Unfortunately, MFGPTs come in pairs sharing their IRQ lines. If VSA * is using the same CMP of the timer's Siamese twin, the IRQ is set to * 2, and we mustn't use nor change it. * XXX: Likewise, 2 Linux drivers might clash if the 2nd overwrites the * IRQ of the 1st. This can only happen if forcing an IRQ, calling this * with *irq==0 is safe. Currently there _are_ no 2 drivers. */ rdmsr(MSR_PIC_ZSEL_LOW, zsel, dummy); shift = ((cmp == MFGPT_CMP1 ? 0 : 4) + timer->nr % 4) * 4; if (((zsel >> shift) & 0xF) == 2) return -EIO; /* Choose IRQ: if none supplied, keep IRQ already set or use default */ if (!*irq) *irq = (zsel >> shift) & 0xF; if (!*irq) *irq = CONFIG_CS5535_MFGPT_DEFAULT_IRQ; /* Can't use IRQ if it's 0 (=disabled), 2, or routed to LPC */ if (*irq < 1 || *irq == 2 || *irq > 15) return -EIO; rdmsr(MSR_PIC_IRQM_LPC, lpc, dummy); if (lpc & (1 << *irq)) return -EIO; /* All chosen and checked - go for it */ if (cs5535_mfgpt_toggle_event(timer, cmp, MFGPT_EVENT_IRQ, enable)) return -EIO; if (enable) { zsel = (zsel & ~(0xF << shift)) | (*irq << shift); wrmsr(MSR_PIC_ZSEL_LOW, zsel, dummy); } return 0; } EXPORT_SYMBOL_GPL(cs5535_mfgpt_set_irq); struct cs5535_mfgpt_timer *cs5535_mfgpt_alloc_timer(int timer_nr, int domain) { struct cs5535_mfgpt_chip *mfgpt = &cs5535_mfgpt_chip; struct cs5535_mfgpt_timer *timer = NULL; unsigned long flags; int max; if (!mfgpt->initialized) goto done; /* only allocate timers from the working domain if requested */ if (domain == MFGPT_DOMAIN_WORKING) max = 6; else max = MFGPT_MAX_TIMERS; if (timer_nr >= max) { /* programmer error. silly programmers! */ WARN_ON(1); goto done; } spin_lock_irqsave(&mfgpt->lock, flags); if (timer_nr < 0) { unsigned long t; /* try to find any available timer */ t = find_first_bit(mfgpt->avail, max); /* set timer_nr to -1 if no timers available */ timer_nr = t < max ? (int) t : -1; } else { /* check if the requested timer's available */ if (!test_bit(timer_nr, mfgpt->avail)) timer_nr = -1; } if (timer_nr >= 0) /* if timer_nr is not -1, it's an available timer */ __clear_bit(timer_nr, mfgpt->avail); spin_unlock_irqrestore(&mfgpt->lock, flags); if (timer_nr < 0) goto done; timer = kmalloc(sizeof(*timer), GFP_KERNEL); if (!timer) { /* aw hell */ spin_lock_irqsave(&mfgpt->lock, flags); __set_bit(timer_nr, mfgpt->avail); spin_unlock_irqrestore(&mfgpt->lock, flags); goto done; } timer->chip = mfgpt; timer->nr = timer_nr; dev_info(&mfgpt->pdev->dev, "registered timer %d\n", timer_nr); done: return timer; } EXPORT_SYMBOL_GPL(cs5535_mfgpt_alloc_timer); /* * XXX: This frees the timer memory, but never resets the actual hardware * timer. The old geode_mfgpt code did this; it would be good to figure * out a way to actually release the hardware timer. See comments below. */ void cs5535_mfgpt_free_timer(struct cs5535_mfgpt_timer *timer) { unsigned long flags; uint16_t val; /* timer can be made available again only if never set up */ val = cs5535_mfgpt_read(timer, MFGPT_REG_SETUP); if (!(val & MFGPT_SETUP_SETUP)) { spin_lock_irqsave(&timer->chip->lock, flags); __set_bit(timer->nr, timer->chip->avail); spin_unlock_irqrestore(&timer->chip->lock, flags); } kfree(timer); } EXPORT_SYMBOL_GPL(cs5535_mfgpt_free_timer); uint16_t cs5535_mfgpt_read(struct cs5535_mfgpt_timer *timer, uint16_t reg) { return inw(timer->chip->base + reg + (timer->nr * 8)); } EXPORT_SYMBOL_GPL(cs5535_mfgpt_read); void cs5535_mfgpt_write(struct cs5535_mfgpt_timer *timer, uint16_t reg, uint16_t value) { outw(value, timer->chip->base + reg + (timer->nr * 8)); } EXPORT_SYMBOL_GPL(cs5535_mfgpt_write); /* * This is a sledgehammer that resets all MFGPT timers. This is required by * some broken BIOSes which leave the system in an unstable state * (TinyBIOS 0.98, for example; fixed in 0.99). It's uncertain as to * whether or not this secret MSR can be used to release individual timers. * Jordan tells me that he and Mitch once played w/ it, but it's unclear * what the results of that were (and they experienced some instability). */ static void reset_all_timers(void) { uint32_t val, dummy; /* The following undocumented bit resets the MFGPT timers */ val = 0xFF; dummy = 0; wrmsr(MSR_MFGPT_SETUP, val, dummy); } /* * This is another sledgehammer to reset all MFGPT timers. * Instead of using the undocumented bit method it clears * IRQ, NMI and RESET settings. */ static void soft_reset(void) { int i; struct cs5535_mfgpt_timer t; for (i = 0; i < MFGPT_MAX_TIMERS; i++) { t.nr = i; cs5535_mfgpt_toggle_event(&t, MFGPT_CMP1, MFGPT_EVENT_RESET, 0); cs5535_mfgpt_toggle_event(&t, MFGPT_CMP2, MFGPT_EVENT_RESET, 0); cs5535_mfgpt_toggle_event(&t, MFGPT_CMP1, MFGPT_EVENT_NMI, 0); cs5535_mfgpt_toggle_event(&t, MFGPT_CMP2, MFGPT_EVENT_NMI, 0); cs5535_mfgpt_toggle_event(&t, MFGPT_CMP1, MFGPT_EVENT_IRQ, 0); cs5535_mfgpt_toggle_event(&t, MFGPT_CMP2, MFGPT_EVENT_IRQ, 0); } } /* * Check whether any MFGPTs are available for the kernel to use. In most * cases, firmware that uses AMD's VSA code will claim all timers during * bootup; we certainly don't want to take them if they're already in use. * In other cases (such as with VSAless OpenFirmware), the system firmware * leaves timers available for us to use. */ static int scan_timers(struct cs5535_mfgpt_chip *mfgpt) { struct cs5535_mfgpt_timer timer = { .chip = mfgpt }; unsigned long flags; int timers = 0; uint16_t val; int i; /* bios workaround */ if (mfgpt_reset_timers == 1) reset_all_timers(); else if (mfgpt_reset_timers == 2) soft_reset(); /* just to be safe, protect this section w/ lock */ spin_lock_irqsave(&mfgpt->lock, flags); for (i = 0; i < MFGPT_MAX_TIMERS; i++) { timer.nr = i; val = cs5535_mfgpt_read(&timer, MFGPT_REG_SETUP); if (!(val & MFGPT_SETUP_SETUP) || mfgpt_reset_timers == 2) { __set_bit(i, mfgpt->avail); timers++; } } spin_unlock_irqrestore(&mfgpt->lock, flags); return timers; } static int cs5535_mfgpt_probe(struct platform_device *pdev) { struct resource *res; int err = -EIO, t; if (mfgpt_reset_timers < 0 || mfgpt_reset_timers > 2) { dev_err(&pdev->dev, "Bad mfgpt_reset_timers value: %i\n", mfgpt_reset_timers); goto done; } /* There are two ways to get the MFGPT base address; one is by * fetching it from MSR_LBAR_MFGPT, the other is by reading the * PCI BAR info. The latter method is easier (especially across * different architectures), so we'll stick with that for now. If * it turns out to be unreliable in the face of crappy BIOSes, we * can always go back to using MSRs.. */ res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (!res) { dev_err(&pdev->dev, "can't fetch device resource info\n"); goto done; } if (!request_region(res->start, resource_size(res), pdev->name)) { dev_err(&pdev->dev, "can't request region\n"); goto done; } /* set up the driver-specific struct */ cs5535_mfgpt_chip.base = res->start; cs5535_mfgpt_chip.pdev = pdev; spin_lock_init(&cs5535_mfgpt_chip.lock); dev_info(&pdev->dev, "reserved resource region %pR\n", res); /* detect the available timers */ t = scan_timers(&cs5535_mfgpt_chip); dev_info(&pdev->dev, "%d MFGPT timers available\n", t); cs5535_mfgpt_chip.initialized = 1; return 0; done: return err; } static struct platform_driver cs5535_mfgpt_driver = { .driver = { .name = DRV_NAME, }, .probe = cs5535_mfgpt_probe, }; static int __init cs5535_mfgpt_init(void) { return platform_driver_register(&cs5535_mfgpt_driver); } module_init(cs5535_mfgpt_init); MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>"); MODULE_DESCRIPTION("CS5535/CS5536 MFGPT timer driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRV_NAME);
gpl-2.0
jmztaylor/android_kernel_amazon_ariel
drivers/video/omap2/dss/hdmi_panel.c
2313
8695
/* * hdmi_panel.c * * HDMI library support functions for TI OMAP4 processors. * * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/ * Authors: Mythri P k <mythripk@ti.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/kernel.h> #include <linux/err.h> #include <linux/io.h> #include <linux/mutex.h> #include <linux/module.h> #include <video/omapdss.h> #include <linux/slab.h> #include "dss.h" static struct { /* This protects the panel ops, mainly when accessing the HDMI IP. */ struct mutex lock; #if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO) /* This protects the audio ops, specifically. */ spinlock_t audio_lock; #endif } hdmi; static int hdmi_panel_probe(struct omap_dss_device *dssdev) { /* Initialize default timings to VGA in DVI mode */ const struct omap_video_timings default_timings = { .x_res = 640, .y_res = 480, .pixel_clock = 25175, .hsw = 96, .hfp = 16, .hbp = 48, .vsw = 2, .vfp = 11, .vbp = 31, .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, .interlace = false, }; DSSDBG("ENTER hdmi_panel_probe\n"); dssdev->panel.timings = default_timings; DSSDBG("hdmi_panel_probe x_res= %d y_res = %d\n", dssdev->panel.timings.x_res, dssdev->panel.timings.y_res); omapdss_hdmi_display_set_timing(dssdev, &dssdev->panel.timings); return 0; } static void hdmi_panel_remove(struct omap_dss_device *dssdev) { } #if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO) static int hdmi_panel_audio_enable(struct omap_dss_device *dssdev) { unsigned long flags; int r; mutex_lock(&hdmi.lock); spin_lock_irqsave(&hdmi.audio_lock, flags); /* enable audio only if the display is active and supports audio */ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE || !hdmi_mode_has_audio()) { DSSERR("audio not supported or display is off\n"); r = -EPERM; goto err; } r = hdmi_audio_enable(); if (!r) dssdev->audio_state = OMAP_DSS_AUDIO_ENABLED; err: spin_unlock_irqrestore(&hdmi.audio_lock, flags); mutex_unlock(&hdmi.lock); return r; } static void hdmi_panel_audio_disable(struct omap_dss_device *dssdev) { unsigned long flags; spin_lock_irqsave(&hdmi.audio_lock, flags); hdmi_audio_disable(); dssdev->audio_state = OMAP_DSS_AUDIO_DISABLED; spin_unlock_irqrestore(&hdmi.audio_lock, flags); } static int hdmi_panel_audio_start(struct omap_dss_device *dssdev) { unsigned long flags; int r; spin_lock_irqsave(&hdmi.audio_lock, flags); /* * No need to check the panel state. It was checked when trasitioning * to AUDIO_ENABLED. */ if (dssdev->audio_state != OMAP_DSS_AUDIO_ENABLED) { DSSERR("audio start from invalid state\n"); r = -EPERM; goto err; } r = hdmi_audio_start(); if (!r) dssdev->audio_state = OMAP_DSS_AUDIO_PLAYING; err: spin_unlock_irqrestore(&hdmi.audio_lock, flags); return r; } static void hdmi_panel_audio_stop(struct omap_dss_device *dssdev) { unsigned long flags; spin_lock_irqsave(&hdmi.audio_lock, flags); hdmi_audio_stop(); dssdev->audio_state = OMAP_DSS_AUDIO_ENABLED; spin_unlock_irqrestore(&hdmi.audio_lock, flags); } static bool hdmi_panel_audio_supported(struct omap_dss_device *dssdev) { bool r = false; mutex_lock(&hdmi.lock); if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) goto err; if (!hdmi_mode_has_audio()) goto err; r = true; err: mutex_unlock(&hdmi.lock); return r; } static int hdmi_panel_audio_config(struct omap_dss_device *dssdev, struct omap_dss_audio *audio) { unsigned long flags; int r; mutex_lock(&hdmi.lock); spin_lock_irqsave(&hdmi.audio_lock, flags); /* config audio only if the display is active and supports audio */ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE || !hdmi_mode_has_audio()) { DSSERR("audio not supported or display is off\n"); r = -EPERM; goto err; } r = hdmi_audio_config(audio); if (!r) dssdev->audio_state = OMAP_DSS_AUDIO_CONFIGURED; err: spin_unlock_irqrestore(&hdmi.audio_lock, flags); mutex_unlock(&hdmi.lock); return r; } #else static int hdmi_panel_audio_enable(struct omap_dss_device *dssdev) { return -EPERM; } static void hdmi_panel_audio_disable(struct omap_dss_device *dssdev) { } static int hdmi_panel_audio_start(struct omap_dss_device *dssdev) { return -EPERM; } static void hdmi_panel_audio_stop(struct omap_dss_device *dssdev) { } static bool hdmi_panel_audio_supported(struct omap_dss_device *dssdev) { return false; } static int hdmi_panel_audio_config(struct omap_dss_device *dssdev, struct omap_dss_audio *audio) { return -EPERM; } #endif static int hdmi_panel_enable(struct omap_dss_device *dssdev) { int r = 0; DSSDBG("ENTER hdmi_panel_enable\n"); mutex_lock(&hdmi.lock); if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) { r = -EINVAL; goto err; } omapdss_hdmi_display_set_timing(dssdev, &dssdev->panel.timings); r = omapdss_hdmi_display_enable(dssdev); if (r) { DSSERR("failed to power on\n"); goto err; } dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; err: mutex_unlock(&hdmi.lock); return r; } static void hdmi_panel_disable(struct omap_dss_device *dssdev) { mutex_lock(&hdmi.lock); if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) { /* * TODO: notify audio users that the display was disabled. For * now, disable audio locally to not break our audio state * machine. */ hdmi_panel_audio_disable(dssdev); omapdss_hdmi_display_disable(dssdev); } dssdev->state = OMAP_DSS_DISPLAY_DISABLED; mutex_unlock(&hdmi.lock); } static void hdmi_get_timings(struct omap_dss_device *dssdev, struct omap_video_timings *timings) { mutex_lock(&hdmi.lock); *timings = dssdev->panel.timings; mutex_unlock(&hdmi.lock); } static void hdmi_set_timings(struct omap_dss_device *dssdev, struct omap_video_timings *timings) { DSSDBG("hdmi_set_timings\n"); mutex_lock(&hdmi.lock); /* * TODO: notify audio users that there was a timings change. For * now, disable audio locally to not break our audio state machine. */ hdmi_panel_audio_disable(dssdev); omapdss_hdmi_display_set_timing(dssdev, timings); dssdev->panel.timings = *timings; mutex_unlock(&hdmi.lock); } static int hdmi_check_timings(struct omap_dss_device *dssdev, struct omap_video_timings *timings) { int r = 0; DSSDBG("hdmi_check_timings\n"); mutex_lock(&hdmi.lock); r = omapdss_hdmi_display_check_timing(dssdev, timings); mutex_unlock(&hdmi.lock); return r; } static int hdmi_read_edid(struct omap_dss_device *dssdev, u8 *buf, int len) { int r; bool need_enable; mutex_lock(&hdmi.lock); need_enable = dssdev->state == OMAP_DSS_DISPLAY_DISABLED; if (need_enable) { r = omapdss_hdmi_core_enable(dssdev); if (r) goto err; } r = omapdss_hdmi_read_edid(buf, len); if (need_enable) omapdss_hdmi_core_disable(dssdev); err: mutex_unlock(&hdmi.lock); return r; } static bool hdmi_detect(struct omap_dss_device *dssdev) { int r; bool need_enable; mutex_lock(&hdmi.lock); need_enable = dssdev->state == OMAP_DSS_DISPLAY_DISABLED; if (need_enable) { r = omapdss_hdmi_core_enable(dssdev); if (r) goto err; } r = omapdss_hdmi_detect(); if (need_enable) omapdss_hdmi_core_disable(dssdev); err: mutex_unlock(&hdmi.lock); return r; } static struct omap_dss_driver hdmi_driver = { .probe = hdmi_panel_probe, .remove = hdmi_panel_remove, .enable = hdmi_panel_enable, .disable = hdmi_panel_disable, .get_timings = hdmi_get_timings, .set_timings = hdmi_set_timings, .check_timings = hdmi_check_timings, .read_edid = hdmi_read_edid, .detect = hdmi_detect, .audio_enable = hdmi_panel_audio_enable, .audio_disable = hdmi_panel_audio_disable, .audio_start = hdmi_panel_audio_start, .audio_stop = hdmi_panel_audio_stop, .audio_supported = hdmi_panel_audio_supported, .audio_config = hdmi_panel_audio_config, .driver = { .name = "hdmi_panel", .owner = THIS_MODULE, }, }; int hdmi_panel_init(void) { mutex_init(&hdmi.lock); #if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO) spin_lock_init(&hdmi.audio_lock); #endif return omap_dss_register_driver(&hdmi_driver); } void hdmi_panel_exit(void) { omap_dss_unregister_driver(&hdmi_driver); }
gpl-2.0
Vegaviet-Dev/Kernel_N4_N910SLK
drivers/staging/media/go7007/go7007-loader.c
2313
3981
/* * Copyright (C) 2008 Sensoray Company Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/firmware.h> #include <cypress_firmware.h> struct fw_config { u16 vendor; u16 product; const char * const fw_name1; const char * const fw_name2; }; struct fw_config fw_configs[] = { { 0x1943, 0xa250, "go7007/s2250-1.fw", "go7007/s2250-2.fw" }, { 0x093b, 0xa002, "go7007/px-m402u.fw", NULL }, { 0x093b, 0xa004, "go7007/px-tv402u.fw", NULL }, { 0x0eb1, 0x6666, "go7007/lr192.fw", NULL }, { 0x0eb1, 0x6668, "go7007/wis-startrek.fw", NULL }, { 0, 0, NULL, NULL } }; MODULE_FIRMWARE("go7007/s2250-1.fw"); MODULE_FIRMWARE("go7007/s2250-2.fw"); MODULE_FIRMWARE("go7007/px-m402u.fw"); MODULE_FIRMWARE("go7007/px-tv402u.fw"); MODULE_FIRMWARE("go7007/lr192.fw"); MODULE_FIRMWARE("go7007/wis-startrek.fw"); static int go7007_loader_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *usbdev; const struct firmware *fw; u16 vendor, product; const char *fw1, *fw2; int ret; int i; usbdev = usb_get_dev(interface_to_usbdev(interface)); if (!usbdev) goto failed2; if (usbdev->descriptor.bNumConfigurations != 1) { dev_err(&interface->dev, "can't handle multiple config\n"); return -ENODEV; } vendor = le16_to_cpu(usbdev->descriptor.idVendor); product = le16_to_cpu(usbdev->descriptor.idProduct); for (i = 0; fw_configs[i].fw_name1; i++) if (fw_configs[i].vendor == vendor && fw_configs[i].product == product) break; /* Should never happen */ if (fw_configs[i].fw_name1 == NULL) goto failed2; fw1 = fw_configs[i].fw_name1; fw2 = fw_configs[i].fw_name2; dev_info(&interface->dev, "loading firmware %s\n", fw1); if (request_firmware(&fw, fw1, &usbdev->dev)) { dev_err(&interface->dev, "unable to load firmware from file \"%s\"\n", fw1); goto failed2; } ret = cypress_load_firmware(usbdev, fw, CYPRESS_FX2); release_firmware(fw); if (0 != ret) { dev_err(&interface->dev, "loader download failed\n"); goto failed2; } if (fw2 == NULL) return 0; if (request_firmware(&fw, fw2, &usbdev->dev)) { dev_err(&interface->dev, "unable to load firmware from file \"%s\"\n", fw2); goto failed2; } ret = cypress_load_firmware(usbdev, fw, CYPRESS_FX2); release_firmware(fw); if (0 != ret) { dev_err(&interface->dev, "firmware download failed\n"); goto failed2; } return 0; failed2: dev_err(&interface->dev, "probe failed\n"); return -ENODEV; } static void go7007_loader_disconnect(struct usb_interface *interface) { dev_info(&interface->dev, "disconnect\n"); usb_set_intfdata(interface, NULL); } static const struct usb_device_id go7007_loader_ids[] = { { USB_DEVICE(0x1943, 0xa250) }, { USB_DEVICE(0x093b, 0xa002) }, { USB_DEVICE(0x093b, 0xa004) }, { USB_DEVICE(0x0eb1, 0x6666) }, { USB_DEVICE(0x0eb1, 0x6668) }, {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, go7007_loader_ids); static struct usb_driver go7007_loader_driver = { .name = "go7007-loader", .probe = go7007_loader_probe, .disconnect = go7007_loader_disconnect, .id_table = go7007_loader_ids, }; module_usb_driver(go7007_loader_driver); MODULE_AUTHOR(""); MODULE_DESCRIPTION("firmware loader for go7007-usb"); MODULE_LICENSE("GPL v2");
gpl-2.0
dev-harsh1998/android_kernel_cyanogen_msm8916
arch/x86/mm/highmem_32.c
2313
3555
#include <linux/highmem.h> #include <linux/module.h> #include <linux/swap.h> /* for totalram_pages */ #include <linux/bootmem.h> void *kmap(struct page *page) { might_sleep(); if (!PageHighMem(page)) return page_address(page); return kmap_high(page); } EXPORT_SYMBOL(kmap); void kunmap(struct page *page) { if (in_interrupt()) BUG(); if (!PageHighMem(page)) return; kunmap_high(page); } EXPORT_SYMBOL(kunmap); /* * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because * no global lock is needed and because the kmap code must perform a global TLB * invalidation when the kmap pool wraps. * * However when holding an atomic kmap it is not legal to sleep, so atomic * kmaps are appropriate for short, tight code paths only. */ void *kmap_atomic_prot(struct page *page, pgprot_t prot) { unsigned long vaddr; int idx, type; /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ pagefault_disable(); if (!PageHighMem(page)) return page_address(page); type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); BUG_ON(!pte_none(*(kmap_pte-idx))); set_pte(kmap_pte-idx, mk_pte(page, prot)); arch_flush_lazy_mmu_mode(); return (void *)vaddr; } EXPORT_SYMBOL(kmap_atomic_prot); void *kmap_atomic(struct page *page) { return kmap_atomic_prot(page, kmap_prot); } EXPORT_SYMBOL(kmap_atomic); /* * This is the same as kmap_atomic() but can map memory that doesn't * have a struct page associated with it. */ void *kmap_atomic_pfn(unsigned long pfn) { return kmap_atomic_prot_pfn(pfn, kmap_prot); } EXPORT_SYMBOL_GPL(kmap_atomic_pfn); void __kunmap_atomic(void *kvaddr) { unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; if (vaddr >= __fix_to_virt(FIX_KMAP_END) && vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) { int idx, type; type = kmap_atomic_idx(); idx = type + KM_TYPE_NR * smp_processor_id(); #ifdef CONFIG_DEBUG_HIGHMEM WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); #endif /* * Force other mappings to Oops if they'll try to access this * pte without first remap it. Keeping stale mappings around * is a bad idea also, in case the page changes cacheability * attributes or becomes a protected page in a hypervisor. */ kpte_clear_flush(kmap_pte-idx, vaddr); kmap_atomic_idx_pop(); arch_flush_lazy_mmu_mode(); } #ifdef CONFIG_DEBUG_HIGHMEM else { BUG_ON(vaddr < PAGE_OFFSET); BUG_ON(vaddr >= (unsigned long)high_memory); } #endif pagefault_enable(); } EXPORT_SYMBOL(__kunmap_atomic); struct page *kmap_atomic_to_page(void *ptr) { unsigned long idx, vaddr = (unsigned long)ptr; pte_t *pte; if (vaddr < FIXADDR_START) return virt_to_page(ptr); idx = virt_to_fix(vaddr); pte = kmap_pte - (idx - FIX_KMAP_BEGIN); return pte_page(*pte); } EXPORT_SYMBOL(kmap_atomic_to_page); void __init set_highmem_pages_init(void) { struct zone *zone; int nid; /* * Explicitly reset zone->managed_pages because set_highmem_pages_init() * is invoked before free_all_bootmem() */ reset_all_zones_managed_pages(); for_each_zone(zone) { unsigned long zone_start_pfn, zone_end_pfn; if (!is_highmem(zone)) continue; zone_start_pfn = zone->zone_start_pfn; zone_end_pfn = zone_start_pfn + zone->spanned_pages; nid = zone_to_nid(zone); printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n", zone->name, nid, zone_start_pfn, zone_end_pfn); add_highpages_with_active_regions(nid, zone_start_pfn, zone_end_pfn); } }
gpl-2.0
timemath/hmfs
arch/m68k/platform/68360/config.c
2313
4634
/* * linux/arch/m68knommu/platform/68360/config.c * * Copyright (c) 2000 Michael Leslie <mleslie@lineo.com> * Copyright (C) 1993 Hamish Macdonald * Copyright (C) 1999 D. Jeff Dionne <jeff@uclinux.org> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #include <stdarg.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <asm/setup.h> #include <asm/pgtable.h> #include <asm/machdep.h> #include <asm/m68360.h> #ifdef CONFIG_UCQUICC #include <asm/bootstd.h> #endif extern void m360_cpm_reset(void); // Mask to select if the PLL prescaler is enabled. #define MCU_PREEN ((unsigned short)(0x0001 << 13)) #if defined(CONFIG_UCQUICC) #define OSCILLATOR (unsigned long int)33000000 #endif static irq_handler_t timer_interrupt; unsigned long int system_clock; extern QUICC *pquicc; /* TODO DON"T Hard Code this */ /* calculate properly using the right PLL and prescaller */ // unsigned int system_clock = 33000000l; extern unsigned long int system_clock; //In kernel setup.c static irqreturn_t hw_tick(int irq, void *dummy) { /* Reset Timer1 */ /* TSTAT &= 0; */ pquicc->timer_ter1 = 0x0002; /* clear timer event */ return timer_interrupt(irq, dummy); } static struct irqaction m68360_timer_irq = { .name = "timer", .flags = IRQF_DISABLED | IRQF_TIMER, .handler = hw_tick, }; void hw_timer_init(irq_handler_t handler) { unsigned char prescaler; unsigned short tgcr_save; #if 0 /* Restart mode, Enable int, 32KHz, Enable timer */ TCTL = TCTL_OM | TCTL_IRQEN | TCTL_CLKSOURCE_32KHZ | TCTL_TEN; /* Set prescaler (Divide 32KHz by 32)*/ TPRER = 31; /* Set compare register 32Khz / 32 / 10 = 100 */ TCMP = 10; request_irq(IRQ_MACHSPEC | 1, timer_routine, 0, "timer", NULL); #endif /* General purpose quicc timers: MC68360UM p7-20 */ /* Set up timer 1 (in [1..4]) to do 100Hz */ tgcr_save = pquicc->timer_tgcr & 0xfff0; pquicc->timer_tgcr = tgcr_save; /* stop and reset timer 1 */ /* pquicc->timer_tgcr |= 0x4444; */ /* halt timers when FREEZE (ie bdm freeze) */ prescaler = 8; pquicc->timer_tmr1 = 0x001a | /* or=1, frr=1, iclk=01b */ (unsigned short)((prescaler - 1) << 8); pquicc->timer_tcn1 = 0x0000; /* initial count */ /* calculate interval for 100Hz based on the _system_clock: */ pquicc->timer_trr1 = (system_clock/ prescaler) / HZ; /* reference count */ pquicc->timer_ter1 = 0x0003; /* clear timer events */ timer_interrupt = handler; /* enable timer 1 interrupt in CIMR */ setup_irq(CPMVEC_TIMER1, &m68360_timer_irq); /* Start timer 1: */ tgcr_save = (pquicc->timer_tgcr & 0xfff0) | 0x0001; pquicc->timer_tgcr = tgcr_save; } int BSP_set_clock_mmss(unsigned long nowtime) { #if 0 short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60; tod->second1 = real_seconds / 10; tod->second2 = real_seconds % 10; tod->minute1 = real_minutes / 10; tod->minute2 = real_minutes % 10; #endif return 0; } void BSP_reset (void) { local_irq_disable(); asm volatile ( "moveal #_start, %a0;\n" "moveb #0, 0xFFFFF300;\n" "moveal 0(%a0), %sp;\n" "moveal 4(%a0), %a0;\n" "jmp (%a0);\n" ); } unsigned char *scc1_hwaddr; static int errno; #if defined (CONFIG_UCQUICC) _bsc0(char *, getserialnum) _bsc1(unsigned char *, gethwaddr, int, a) _bsc1(char *, getbenv, char *, a) #endif void config_BSP(char *command, int len) { unsigned char *p; m360_cpm_reset(); /* Calculate the real system clock value. */ { unsigned int local_pllcr = (unsigned int)(pquicc->sim_pllcr); if( local_pllcr & MCU_PREEN ) // If the prescaler is dividing by 128 { int mf = (int)(pquicc->sim_pllcr & 0x0fff); system_clock = (OSCILLATOR / 128) * (mf + 1); } else { int mf = (int)(pquicc->sim_pllcr & 0x0fff); system_clock = (OSCILLATOR) * (mf + 1); } } printk(KERN_INFO "\n68360 QUICC support (C) 2000 Lineo Inc.\n"); #if defined(CONFIG_UCQUICC) && 0 printk(KERN_INFO "uCquicc serial string [%s]\n",getserialnum()); p = scc1_hwaddr = gethwaddr(0); printk(KERN_INFO "uCquicc hwaddr %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", p[0], p[1], p[2], p[3], p[4], p[5]); p = getbenv("APPEND"); if (p) strcpy(p,command); else command[0] = 0; #else scc1_hwaddr = "\00\01\02\03\04\05"; #endif mach_reset = BSP_reset; }
gpl-2.0
Nihhaar/android_kernel_xiaomi_mocha
net/sched/sch_gred.c
2569
14139
/* * net/sched/sch_gred.c Generic Random Early Detection queue. * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Authors: J Hadi Salim (hadi@cyberus.ca) 1998-2002 * * 991129: - Bug fix with grio mode * - a better sing. AvgQ mode with Grio(WRED) * - A finer grained VQ dequeue based on sugestion * from Ren Liu * - More error checks * * For all the glorious comments look at include/net/red.h */ #include <linux/slab.h> #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <net/pkt_sched.h> #include <net/red.h> #define GRED_DEF_PRIO (MAX_DPs / 2) #define GRED_VQ_MASK (MAX_DPs - 1) struct gred_sched_data; struct gred_sched; struct gred_sched_data { u32 limit; /* HARD maximal queue length */ u32 DP; /* the drop parameters */ u32 bytesin; /* bytes seen on virtualQ so far*/ u32 packetsin; /* packets seen on virtualQ so far*/ u32 backlog; /* bytes on the virtualQ */ u8 prio; /* the prio of this vq */ struct red_parms parms; struct red_vars vars; struct red_stats stats; }; enum { GRED_WRED_MODE = 1, GRED_RIO_MODE, }; struct gred_sched { struct gred_sched_data *tab[MAX_DPs]; unsigned long flags; u32 red_flags; u32 DPs; u32 def; struct red_vars wred_set; }; static inline int gred_wred_mode(struct gred_sched *table) { return test_bit(GRED_WRED_MODE, &table->flags); } static inline void gred_enable_wred_mode(struct gred_sched *table) { __set_bit(GRED_WRED_MODE, &table->flags); } static inline void gred_disable_wred_mode(struct gred_sched *table) { __clear_bit(GRED_WRED_MODE, &table->flags); } static inline int gred_rio_mode(struct gred_sched *table) { return test_bit(GRED_RIO_MODE, &table->flags); } static inline void gred_enable_rio_mode(struct gred_sched *table) { __set_bit(GRED_RIO_MODE, &table->flags); } static inline void gred_disable_rio_mode(struct gred_sched *table) { __clear_bit(GRED_RIO_MODE, &table->flags); } static inline int gred_wred_mode_check(struct Qdisc *sch) { struct gred_sched *table = qdisc_priv(sch); int i; /* Really ugly O(n^2) but shouldn't be necessary too frequent. */ for (i = 0; i < table->DPs; i++) { struct gred_sched_data *q = table->tab[i]; int n; if (q == NULL) continue; for (n = i + 1; n < table->DPs; n++) if (table->tab[n] && table->tab[n]->prio == q->prio) return 1; } return 0; } static inline unsigned int gred_backlog(struct gred_sched *table, struct gred_sched_data *q, struct Qdisc *sch) { if (gred_wred_mode(table)) return sch->qstats.backlog; else return q->backlog; } static inline u16 tc_index_to_dp(struct sk_buff *skb) { return skb->tc_index & GRED_VQ_MASK; } static inline void gred_load_wred_set(const struct gred_sched *table, struct gred_sched_data *q) { q->vars.qavg = table->wred_set.qavg; q->vars.qidlestart = table->wred_set.qidlestart; } static inline void gred_store_wred_set(struct gred_sched *table, struct gred_sched_data *q) { table->wred_set.qavg = q->vars.qavg; table->wred_set.qidlestart = q->vars.qidlestart; } static inline int gred_use_ecn(struct gred_sched *t) { return t->red_flags & TC_RED_ECN; } static inline int gred_use_harddrop(struct gred_sched *t) { return t->red_flags & TC_RED_HARDDROP; } static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct gred_sched_data *q = NULL; struct gred_sched *t = qdisc_priv(sch); unsigned long qavg = 0; u16 dp = tc_index_to_dp(skb); if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { dp = t->def; q = t->tab[dp]; if (!q) { /* Pass through packets not assigned to a DP * if no default DP has been configured. This * allows for DP flows to be left untouched. */ if (skb_queue_len(&sch->q) < qdisc_dev(sch)->tx_queue_len) return qdisc_enqueue_tail(skb, sch); else goto drop; } /* fix tc_index? --could be controversial but needed for requeueing */ skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp; } /* sum up all the qaves of prios < ours to get the new qave */ if (!gred_wred_mode(t) && gred_rio_mode(t)) { int i; for (i = 0; i < t->DPs; i++) { if (t->tab[i] && t->tab[i]->prio < q->prio && !red_is_idling(&t->tab[i]->vars)) qavg += t->tab[i]->vars.qavg; } } q->packetsin++; q->bytesin += qdisc_pkt_len(skb); if (gred_wred_mode(t)) gred_load_wred_set(t, q); q->vars.qavg = red_calc_qavg(&q->parms, &q->vars, gred_backlog(t, q, sch)); if (red_is_idling(&q->vars)) red_end_of_idle_period(&q->vars); if (gred_wred_mode(t)) gred_store_wred_set(t, q); switch (red_action(&q->parms, &q->vars, q->vars.qavg + qavg)) { case RED_DONT_MARK: break; case RED_PROB_MARK: sch->qstats.overlimits++; if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) { q->stats.prob_drop++; goto congestion_drop; } q->stats.prob_mark++; break; case RED_HARD_MARK: sch->qstats.overlimits++; if (gred_use_harddrop(t) || !gred_use_ecn(t) || !INET_ECN_set_ce(skb)) { q->stats.forced_drop++; goto congestion_drop; } q->stats.forced_mark++; break; } if (q->backlog + qdisc_pkt_len(skb) <= q->limit) { q->backlog += qdisc_pkt_len(skb); return qdisc_enqueue_tail(skb, sch); } q->stats.pdrop++; drop: return qdisc_drop(skb, sch); congestion_drop: qdisc_drop(skb, sch); return NET_XMIT_CN; } static struct sk_buff *gred_dequeue(struct Qdisc *sch) { struct sk_buff *skb; struct gred_sched *t = qdisc_priv(sch); skb = qdisc_dequeue_head(sch); if (skb) { struct gred_sched_data *q; u16 dp = tc_index_to_dp(skb); if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x after dequeue, screwing up backlog\n", tc_index_to_dp(skb)); } else { q->backlog -= qdisc_pkt_len(skb); if (gred_wred_mode(t)) { if (!sch->qstats.backlog) red_start_of_idle_period(&t->wred_set); } else { if (!q->backlog) red_start_of_idle_period(&q->vars); } } return skb; } return NULL; } static unsigned int gred_drop(struct Qdisc *sch) { struct sk_buff *skb; struct gred_sched *t = qdisc_priv(sch); skb = qdisc_dequeue_tail(sch); if (skb) { unsigned int len = qdisc_pkt_len(skb); struct gred_sched_data *q; u16 dp = tc_index_to_dp(skb); if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x while dropping, screwing up backlog\n", tc_index_to_dp(skb)); } else { q->backlog -= len; q->stats.other++; if (gred_wred_mode(t)) { if (!sch->qstats.backlog) red_start_of_idle_period(&t->wred_set); } else { if (!q->backlog) red_start_of_idle_period(&q->vars); } } qdisc_drop(skb, sch); return len; } return 0; } static void gred_reset(struct Qdisc *sch) { int i; struct gred_sched *t = qdisc_priv(sch); qdisc_reset_queue(sch); for (i = 0; i < t->DPs; i++) { struct gred_sched_data *q = t->tab[i]; if (!q) continue; red_restart(&q->vars); q->backlog = 0; } } static inline void gred_destroy_vq(struct gred_sched_data *q) { kfree(q); } static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps) { struct gred_sched *table = qdisc_priv(sch); struct tc_gred_sopt *sopt; int i; if (dps == NULL) return -EINVAL; sopt = nla_data(dps); if (sopt->DPs > MAX_DPs || sopt->DPs == 0 || sopt->def_DP >= sopt->DPs) return -EINVAL; sch_tree_lock(sch); table->DPs = sopt->DPs; table->def = sopt->def_DP; table->red_flags = sopt->flags; /* * Every entry point to GRED is synchronized with the above code * and the DP is checked against DPs, i.e. shadowed VQs can no * longer be found so we can unlock right here. */ sch_tree_unlock(sch); if (sopt->grio) { gred_enable_rio_mode(table); gred_disable_wred_mode(table); if (gred_wred_mode_check(sch)) gred_enable_wred_mode(table); } else { gred_disable_rio_mode(table); gred_disable_wred_mode(table); } for (i = table->DPs; i < MAX_DPs; i++) { if (table->tab[i]) { pr_warning("GRED: Warning: Destroying " "shadowed VQ 0x%x\n", i); gred_destroy_vq(table->tab[i]); table->tab[i] = NULL; } } return 0; } static inline int gred_change_vq(struct Qdisc *sch, int dp, struct tc_gred_qopt *ctl, int prio, u8 *stab, u32 max_P, struct gred_sched_data **prealloc) { struct gred_sched *table = qdisc_priv(sch); struct gred_sched_data *q = table->tab[dp]; if (!q) { table->tab[dp] = q = *prealloc; *prealloc = NULL; if (!q) return -ENOMEM; } q->DP = dp; q->prio = prio; q->limit = ctl->limit; if (q->backlog == 0) red_end_of_idle_period(&q->vars); red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog, ctl->Scell_log, stab, max_P); red_set_vars(&q->vars); return 0; } static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = { [TCA_GRED_PARMS] = { .len = sizeof(struct tc_gred_qopt) }, [TCA_GRED_STAB] = { .len = 256 }, [TCA_GRED_DPS] = { .len = sizeof(struct tc_gred_sopt) }, [TCA_GRED_MAX_P] = { .type = NLA_U32 }, }; static int gred_change(struct Qdisc *sch, struct nlattr *opt) { struct gred_sched *table = qdisc_priv(sch); struct tc_gred_qopt *ctl; struct nlattr *tb[TCA_GRED_MAX + 1]; int err, prio = GRED_DEF_PRIO; u8 *stab; u32 max_P; struct gred_sched_data *prealloc; if (opt == NULL) return -EINVAL; err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy); if (err < 0) return err; if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) return gred_change_table_def(sch, opt); if (tb[TCA_GRED_PARMS] == NULL || tb[TCA_GRED_STAB] == NULL) return -EINVAL; max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0; err = -EINVAL; ctl = nla_data(tb[TCA_GRED_PARMS]); stab = nla_data(tb[TCA_GRED_STAB]); if (ctl->DP >= table->DPs) goto errout; if (gred_rio_mode(table)) { if (ctl->prio == 0) { int def_prio = GRED_DEF_PRIO; if (table->tab[table->def]) def_prio = table->tab[table->def]->prio; printk(KERN_DEBUG "GRED: DP %u does not have a prio " "setting default to %d\n", ctl->DP, def_prio); prio = def_prio; } else prio = ctl->prio; } prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL); sch_tree_lock(sch); err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc); if (err < 0) goto errout_locked; if (gred_rio_mode(table)) { gred_disable_wred_mode(table); if (gred_wred_mode_check(sch)) gred_enable_wred_mode(table); } err = 0; errout_locked: sch_tree_unlock(sch); kfree(prealloc); errout: return err; } static int gred_init(struct Qdisc *sch, struct nlattr *opt) { struct nlattr *tb[TCA_GRED_MAX + 1]; int err; if (opt == NULL) return -EINVAL; err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy); if (err < 0) return err; if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB]) return -EINVAL; return gred_change_table_def(sch, tb[TCA_GRED_DPS]); } static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) { struct gred_sched *table = qdisc_priv(sch); struct nlattr *parms, *opts = NULL; int i; u32 max_p[MAX_DPs]; struct tc_gred_sopt sopt = { .DPs = table->DPs, .def_DP = table->def, .grio = gred_rio_mode(table), .flags = table->red_flags, }; opts = nla_nest_start(skb, TCA_OPTIONS); if (opts == NULL) goto nla_put_failure; if (nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt)) goto nla_put_failure; for (i = 0; i < MAX_DPs; i++) { struct gred_sched_data *q = table->tab[i]; max_p[i] = q ? q->parms.max_P : 0; } if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p)) goto nla_put_failure; parms = nla_nest_start(skb, TCA_GRED_PARMS); if (parms == NULL) goto nla_put_failure; for (i = 0; i < MAX_DPs; i++) { struct gred_sched_data *q = table->tab[i]; struct tc_gred_qopt opt; unsigned long qavg; memset(&opt, 0, sizeof(opt)); if (!q) { /* hack -- fix at some point with proper message This is how we indicate to tc that there is no VQ at this DP */ opt.DP = MAX_DPs + i; goto append_opt; } opt.limit = q->limit; opt.DP = q->DP; opt.backlog = q->backlog; opt.prio = q->prio; opt.qth_min = q->parms.qth_min >> q->parms.Wlog; opt.qth_max = q->parms.qth_max >> q->parms.Wlog; opt.Wlog = q->parms.Wlog; opt.Plog = q->parms.Plog; opt.Scell_log = q->parms.Scell_log; opt.other = q->stats.other; opt.early = q->stats.prob_drop; opt.forced = q->stats.forced_drop; opt.pdrop = q->stats.pdrop; opt.packets = q->packetsin; opt.bytesin = q->bytesin; if (gred_wred_mode(table)) gred_load_wred_set(table, q); qavg = red_calc_qavg(&q->parms, &q->vars, q->vars.qavg >> q->parms.Wlog); opt.qave = qavg >> q->parms.Wlog; append_opt: if (nla_append(skb, sizeof(opt), &opt) < 0) goto nla_put_failure; } nla_nest_end(skb, parms); return nla_nest_end(skb, opts); nla_put_failure: nla_nest_cancel(skb, opts); return -EMSGSIZE; } static void gred_destroy(struct Qdisc *sch) { struct gred_sched *table = qdisc_priv(sch); int i; for (i = 0; i < table->DPs; i++) { if (table->tab[i]) gred_destroy_vq(table->tab[i]); } } static struct Qdisc_ops gred_qdisc_ops __read_mostly = { .id = "gred", .priv_size = sizeof(struct gred_sched), .enqueue = gred_enqueue, .dequeue = gred_dequeue, .peek = qdisc_peek_head, .drop = gred_drop, .init = gred_init, .reset = gred_reset, .destroy = gred_destroy, .change = gred_change, .dump = gred_dump, .owner = THIS_MODULE, }; static int __init gred_module_init(void) { return register_qdisc(&gred_qdisc_ops); } static void __exit gred_module_exit(void) { unregister_qdisc(&gred_qdisc_ops); } module_init(gred_module_init) module_exit(gred_module_exit) MODULE_LICENSE("GPL");
gpl-2.0
syhost/android_kernel_pantech_ef50l
fs/ext4/move_extent.c
3081
41825
/* * Copyright (c) 2008,2009 NEC Software Tohoku, Ltd. * Written by Takashi Sato <t-sato@yk.jp.nec.com> * Akira Fujita <a-fujita@rs.jp.nec.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2.1 of the GNU Lesser General Public License * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/fs.h> #include <linux/quotaops.h> #include <linux/slab.h> #include "ext4_jbd2.h" #include "ext4.h" /** * get_ext_path - Find an extent path for designated logical block number. * * @inode: an inode which is searched * @lblock: logical block number to find an extent path * @path: pointer to an extent path pointer (for output) * * ext4_ext_find_extent wrapper. Return 0 on success, or a negative error value * on failure. */ static inline int get_ext_path(struct inode *inode, ext4_lblk_t lblock, struct ext4_ext_path **path) { int ret = 0; *path = ext4_ext_find_extent(inode, lblock, *path); if (IS_ERR(*path)) { ret = PTR_ERR(*path); *path = NULL; } else if ((*path)[ext_depth(inode)].p_ext == NULL) ret = -ENODATA; return ret; } /** * copy_extent_status - Copy the extent's initialization status * * @src: an extent for getting initialize status * @dest: an extent to be set the status */ static void copy_extent_status(struct ext4_extent *src, struct ext4_extent *dest) { if (ext4_ext_is_uninitialized(src)) ext4_ext_mark_uninitialized(dest); else dest->ee_len = cpu_to_le16(ext4_ext_get_actual_len(dest)); } /** * mext_next_extent - Search for the next extent and set it to "extent" * * @inode: inode which is searched * @path: this will obtain data for the next extent * @extent: pointer to the next extent we have just gotten * * Search the next extent in the array of ext4_ext_path structure (@path) * and set it to ext4_extent structure (@extent). In addition, the member of * @path (->p_ext) also points the next extent. Return 0 on success, 1 if * ext4_ext_path structure refers to the last extent, or a negative error * value on failure. */ static int mext_next_extent(struct inode *inode, struct ext4_ext_path *path, struct ext4_extent **extent) { struct ext4_extent_header *eh; int ppos, leaf_ppos = path->p_depth; ppos = leaf_ppos; if (EXT_LAST_EXTENT(path[ppos].p_hdr) > path[ppos].p_ext) { /* leaf block */ *extent = ++path[ppos].p_ext; path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext); return 0; } while (--ppos >= 0) { if (EXT_LAST_INDEX(path[ppos].p_hdr) > path[ppos].p_idx) { int cur_ppos = ppos; /* index block */ path[ppos].p_idx++; path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx); if (path[ppos+1].p_bh) brelse(path[ppos+1].p_bh); path[ppos+1].p_bh = sb_bread(inode->i_sb, path[ppos].p_block); if (!path[ppos+1].p_bh) return -EIO; path[ppos+1].p_hdr = ext_block_hdr(path[ppos+1].p_bh); /* Halfway index block */ while (++cur_ppos < leaf_ppos) { path[cur_ppos].p_idx = EXT_FIRST_INDEX(path[cur_ppos].p_hdr); path[cur_ppos].p_block = ext4_idx_pblock(path[cur_ppos].p_idx); if (path[cur_ppos+1].p_bh) brelse(path[cur_ppos+1].p_bh); path[cur_ppos+1].p_bh = sb_bread(inode->i_sb, path[cur_ppos].p_block); if (!path[cur_ppos+1].p_bh) return -EIO; path[cur_ppos+1].p_hdr = ext_block_hdr(path[cur_ppos+1].p_bh); } path[leaf_ppos].p_ext = *extent = NULL; eh = path[leaf_ppos].p_hdr; if (le16_to_cpu(eh->eh_entries) == 0) /* empty leaf is found */ return -ENODATA; /* leaf block */ path[leaf_ppos].p_ext = *extent = EXT_FIRST_EXTENT(path[leaf_ppos].p_hdr); path[leaf_ppos].p_block = ext4_ext_pblock(path[leaf_ppos].p_ext); return 0; } } /* We found the last extent */ return 1; } /** * mext_check_null_inode - NULL check for two inodes * * If inode1 or inode2 is NULL, return -EIO. Otherwise, return 0. */ static int mext_check_null_inode(struct inode *inode1, struct inode *inode2, const char *function, unsigned int line) { int ret = 0; if (inode1 == NULL) { __ext4_error(inode2->i_sb, function, line, "Both inodes should not be NULL: " "inode1 NULL inode2 %lu", inode2->i_ino); ret = -EIO; } else if (inode2 == NULL) { __ext4_error(inode1->i_sb, function, line, "Both inodes should not be NULL: " "inode1 %lu inode2 NULL", inode1->i_ino); ret = -EIO; } return ret; } /** * double_down_write_data_sem - Acquire two inodes' write lock of i_data_sem * * @orig_inode: original inode structure * @donor_inode: donor inode structure * Acquire write lock of i_data_sem of the two inodes (orig and donor) by * i_ino order. */ static void double_down_write_data_sem(struct inode *orig_inode, struct inode *donor_inode) { struct inode *first = orig_inode, *second = donor_inode; /* * Use the inode number to provide the stable locking order instead * of its address, because the C language doesn't guarantee you can * compare pointers that don't come from the same array. */ if (donor_inode->i_ino < orig_inode->i_ino) { first = donor_inode; second = orig_inode; } down_write(&EXT4_I(first)->i_data_sem); down_write_nested(&EXT4_I(second)->i_data_sem, SINGLE_DEPTH_NESTING); } /** * double_up_write_data_sem - Release two inodes' write lock of i_data_sem * * @orig_inode: original inode structure to be released its lock first * @donor_inode: donor inode structure to be released its lock second * Release write lock of i_data_sem of two inodes (orig and donor). */ static void double_up_write_data_sem(struct inode *orig_inode, struct inode *donor_inode) { up_write(&EXT4_I(orig_inode)->i_data_sem); up_write(&EXT4_I(donor_inode)->i_data_sem); } /** * mext_insert_across_blocks - Insert extents across leaf block * * @handle: journal handle * @orig_inode: original inode * @o_start: first original extent to be changed * @o_end: last original extent to be changed * @start_ext: first new extent to be inserted * @new_ext: middle of new extent to be inserted * @end_ext: last new extent to be inserted * * Allocate a new leaf block and insert extents into it. Return 0 on success, * or a negative error value on failure. */ static int mext_insert_across_blocks(handle_t *handle, struct inode *orig_inode, struct ext4_extent *o_start, struct ext4_extent *o_end, struct ext4_extent *start_ext, struct ext4_extent *new_ext, struct ext4_extent *end_ext) { struct ext4_ext_path *orig_path = NULL; ext4_lblk_t eblock = 0; int new_flag = 0; int end_flag = 0; int err = 0; if (start_ext->ee_len && new_ext->ee_len && end_ext->ee_len) { if (o_start == o_end) { /* start_ext new_ext end_ext * donor |---------|-----------|--------| * orig |------------------------------| */ end_flag = 1; } else { /* start_ext new_ext end_ext * donor |---------|----------|---------| * orig |---------------|--------------| */ o_end->ee_block = end_ext->ee_block; o_end->ee_len = end_ext->ee_len; ext4_ext_store_pblock(o_end, ext4_ext_pblock(end_ext)); } o_start->ee_len = start_ext->ee_len; eblock = le32_to_cpu(start_ext->ee_block); new_flag = 1; } else if (start_ext->ee_len && new_ext->ee_len && !end_ext->ee_len && o_start == o_end) { /* start_ext new_ext * donor |--------------|---------------| * orig |------------------------------| */ o_start->ee_len = start_ext->ee_len; eblock = le32_to_cpu(start_ext->ee_block); new_flag = 1; } else if (!start_ext->ee_len && new_ext->ee_len && end_ext->ee_len && o_start == o_end) { /* new_ext end_ext * donor |--------------|---------------| * orig |------------------------------| */ o_end->ee_block = end_ext->ee_block; o_end->ee_len = end_ext->ee_len; ext4_ext_store_pblock(o_end, ext4_ext_pblock(end_ext)); /* * Set 0 to the extent block if new_ext was * the first block. */ if (new_ext->ee_block) eblock = le32_to_cpu(new_ext->ee_block); new_flag = 1; } else { ext4_debug("ext4 move extent: Unexpected insert case\n"); return -EIO; } if (new_flag) { err = get_ext_path(orig_inode, eblock, &orig_path); if (err) goto out; if (ext4_ext_insert_extent(handle, orig_inode, orig_path, new_ext, 0)) goto out; } if (end_flag) { err = get_ext_path(orig_inode, le32_to_cpu(end_ext->ee_block) - 1, &orig_path); if (err) goto out; if (ext4_ext_insert_extent(handle, orig_inode, orig_path, end_ext, 0)) goto out; } out: if (orig_path) { ext4_ext_drop_refs(orig_path); kfree(orig_path); } return err; } /** * mext_insert_inside_block - Insert new extent to the extent block * * @o_start: first original extent to be moved * @o_end: last original extent to be moved * @start_ext: first new extent to be inserted * @new_ext: middle of new extent to be inserted * @end_ext: last new extent to be inserted * @eh: extent header of target leaf block * @range_to_move: used to decide how to insert extent * * Insert extents into the leaf block. The extent (@o_start) is overwritten * by inserted extents. */ static void mext_insert_inside_block(struct ext4_extent *o_start, struct ext4_extent *o_end, struct ext4_extent *start_ext, struct ext4_extent *new_ext, struct ext4_extent *end_ext, struct ext4_extent_header *eh, int range_to_move) { int i = 0; unsigned long len; /* Move the existing extents */ if (range_to_move && o_end < EXT_LAST_EXTENT(eh)) { len = (unsigned long)(EXT_LAST_EXTENT(eh) + 1) - (unsigned long)(o_end + 1); memmove(o_end + 1 + range_to_move, o_end + 1, len); } /* Insert start entry */ if (start_ext->ee_len) o_start[i++].ee_len = start_ext->ee_len; /* Insert new entry */ if (new_ext->ee_len) { o_start[i] = *new_ext; ext4_ext_store_pblock(&o_start[i++], ext4_ext_pblock(new_ext)); } /* Insert end entry */ if (end_ext->ee_len) o_start[i] = *end_ext; /* Increment the total entries counter on the extent block */ le16_add_cpu(&eh->eh_entries, range_to_move); } /** * mext_insert_extents - Insert new extent * * @handle: journal handle * @orig_inode: original inode * @orig_path: path indicates first extent to be changed * @o_start: first original extent to be changed * @o_end: last original extent to be changed * @start_ext: first new extent to be inserted * @new_ext: middle of new extent to be inserted * @end_ext: last new extent to be inserted * * Call the function to insert extents. If we cannot add more extents into * the leaf block, we call mext_insert_across_blocks() to create a * new leaf block. Otherwise call mext_insert_inside_block(). Return 0 * on success, or a negative error value on failure. */ static int mext_insert_extents(handle_t *handle, struct inode *orig_inode, struct ext4_ext_path *orig_path, struct ext4_extent *o_start, struct ext4_extent *o_end, struct ext4_extent *start_ext, struct ext4_extent *new_ext, struct ext4_extent *end_ext) { struct ext4_extent_header *eh; unsigned long need_slots, slots_range; int range_to_move, depth, ret; /* * The extents need to be inserted * start_extent + new_extent + end_extent. */ need_slots = (start_ext->ee_len ? 1 : 0) + (end_ext->ee_len ? 1 : 0) + (new_ext->ee_len ? 1 : 0); /* The number of slots between start and end */ slots_range = ((unsigned long)(o_end + 1) - (unsigned long)o_start + 1) / sizeof(struct ext4_extent); /* Range to move the end of extent */ range_to_move = need_slots - slots_range; depth = orig_path->p_depth; orig_path += depth; eh = orig_path->p_hdr; if (depth) { /* Register to journal */ ret = ext4_journal_get_write_access(handle, orig_path->p_bh); if (ret) return ret; } /* Expansion */ if (range_to_move > 0 && (range_to_move > le16_to_cpu(eh->eh_max) - le16_to_cpu(eh->eh_entries))) { ret = mext_insert_across_blocks(handle, orig_inode, o_start, o_end, start_ext, new_ext, end_ext); if (ret < 0) return ret; } else mext_insert_inside_block(o_start, o_end, start_ext, new_ext, end_ext, eh, range_to_move); if (depth) { ret = ext4_handle_dirty_metadata(handle, orig_inode, orig_path->p_bh); if (ret) return ret; } else { ret = ext4_mark_inode_dirty(handle, orig_inode); if (ret < 0) return ret; } return 0; } /** * mext_leaf_block - Move one leaf extent block into the inode. * * @handle: journal handle * @orig_inode: original inode * @orig_path: path indicates first extent to be changed * @dext: donor extent * @from: start offset on the target file * * In order to insert extents into the leaf block, we must divide the extent * in the leaf block into three extents. The one is located to be inserted * extents, and the others are located around it. * * Therefore, this function creates structures to save extents of the leaf * block, and inserts extents by calling mext_insert_extents() with * created extents. Return 0 on success, or a negative error value on failure. */ static int mext_leaf_block(handle_t *handle, struct inode *orig_inode, struct ext4_ext_path *orig_path, struct ext4_extent *dext, ext4_lblk_t *from) { struct ext4_extent *oext, *o_start, *o_end, *prev_ext; struct ext4_extent new_ext, start_ext, end_ext; ext4_lblk_t new_ext_end; int oext_alen, new_ext_alen, end_ext_alen; int depth = ext_depth(orig_inode); int ret; start_ext.ee_block = end_ext.ee_block = 0; o_start = o_end = oext = orig_path[depth].p_ext; oext_alen = ext4_ext_get_actual_len(oext); start_ext.ee_len = end_ext.ee_len = 0; new_ext.ee_block = cpu_to_le32(*from); ext4_ext_store_pblock(&new_ext, ext4_ext_pblock(dext)); new_ext.ee_len = dext->ee_len; new_ext_alen = ext4_ext_get_actual_len(&new_ext); new_ext_end = le32_to_cpu(new_ext.ee_block) + new_ext_alen - 1; /* * Case: original extent is first * oext |--------| * new_ext |--| * start_ext |--| */ if (le32_to_cpu(oext->ee_block) < le32_to_cpu(new_ext.ee_block) && le32_to_cpu(new_ext.ee_block) < le32_to_cpu(oext->ee_block) + oext_alen) { start_ext.ee_len = cpu_to_le16(le32_to_cpu(new_ext.ee_block) - le32_to_cpu(oext->ee_block)); start_ext.ee_block = oext->ee_block; copy_extent_status(oext, &start_ext); } else if (oext > EXT_FIRST_EXTENT(orig_path[depth].p_hdr)) { prev_ext = oext - 1; /* * We can merge new_ext into previous extent, * if these are contiguous and same extent type. */ if (ext4_can_extents_be_merged(orig_inode, prev_ext, &new_ext)) { o_start = prev_ext; start_ext.ee_len = cpu_to_le16( ext4_ext_get_actual_len(prev_ext) + new_ext_alen); start_ext.ee_block = oext->ee_block; copy_extent_status(prev_ext, &start_ext); new_ext.ee_len = 0; } } /* * Case: new_ext_end must be less than oext * oext |-----------| * new_ext |-------| */ if (le32_to_cpu(oext->ee_block) + oext_alen - 1 < new_ext_end) { EXT4_ERROR_INODE(orig_inode, "new_ext_end(%u) should be less than or equal to " "oext->ee_block(%u) + oext_alen(%d) - 1", new_ext_end, le32_to_cpu(oext->ee_block), oext_alen); ret = -EIO; goto out; } /* * Case: new_ext is smaller than original extent * oext |---------------| * new_ext |-----------| * end_ext |---| */ if (le32_to_cpu(oext->ee_block) <= new_ext_end && new_ext_end < le32_to_cpu(oext->ee_block) + oext_alen - 1) { end_ext.ee_len = cpu_to_le16(le32_to_cpu(oext->ee_block) + oext_alen - 1 - new_ext_end); copy_extent_status(oext, &end_ext); end_ext_alen = ext4_ext_get_actual_len(&end_ext); ext4_ext_store_pblock(&end_ext, (ext4_ext_pblock(o_end) + oext_alen - end_ext_alen)); end_ext.ee_block = cpu_to_le32(le32_to_cpu(o_end->ee_block) + oext_alen - end_ext_alen); } ret = mext_insert_extents(handle, orig_inode, orig_path, o_start, o_end, &start_ext, &new_ext, &end_ext); out: return ret; } /** * mext_calc_swap_extents - Calculate extents for extent swapping. * * @tmp_dext: the extent that will belong to the original inode * @tmp_oext: the extent that will belong to the donor inode * @orig_off: block offset of original inode * @donor_off: block offset of donor inode * @max_count: the maximum length of extents * * Return 0 on success, or a negative error value on failure. */ static int mext_calc_swap_extents(struct ext4_extent *tmp_dext, struct ext4_extent *tmp_oext, ext4_lblk_t orig_off, ext4_lblk_t donor_off, ext4_lblk_t max_count) { ext4_lblk_t diff, orig_diff; struct ext4_extent dext_old, oext_old; BUG_ON(orig_off != donor_off); /* original and donor extents have to cover the same block offset */ if (orig_off < le32_to_cpu(tmp_oext->ee_block) || le32_to_cpu(tmp_oext->ee_block) + ext4_ext_get_actual_len(tmp_oext) - 1 < orig_off) return -ENODATA; if (orig_off < le32_to_cpu(tmp_dext->ee_block) || le32_to_cpu(tmp_dext->ee_block) + ext4_ext_get_actual_len(tmp_dext) - 1 < orig_off) return -ENODATA; dext_old = *tmp_dext; oext_old = *tmp_oext; /* When tmp_dext is too large, pick up the target range. */ diff = donor_off - le32_to_cpu(tmp_dext->ee_block); ext4_ext_store_pblock(tmp_dext, ext4_ext_pblock(tmp_dext) + diff); tmp_dext->ee_block = cpu_to_le32(le32_to_cpu(tmp_dext->ee_block) + diff); tmp_dext->ee_len = cpu_to_le16(le16_to_cpu(tmp_dext->ee_len) - diff); if (max_count < ext4_ext_get_actual_len(tmp_dext)) tmp_dext->ee_len = cpu_to_le16(max_count); orig_diff = orig_off - le32_to_cpu(tmp_oext->ee_block); ext4_ext_store_pblock(tmp_oext, ext4_ext_pblock(tmp_oext) + orig_diff); /* Adjust extent length if donor extent is larger than orig */ if (ext4_ext_get_actual_len(tmp_dext) > ext4_ext_get_actual_len(tmp_oext) - orig_diff) tmp_dext->ee_len = cpu_to_le16(le16_to_cpu(tmp_oext->ee_len) - orig_diff); tmp_oext->ee_len = cpu_to_le16(ext4_ext_get_actual_len(tmp_dext)); copy_extent_status(&oext_old, tmp_dext); copy_extent_status(&dext_old, tmp_oext); return 0; } /** * mext_replace_branches - Replace original extents with new extents * * @handle: journal handle * @orig_inode: original inode * @donor_inode: donor inode * @from: block offset of orig_inode * @count: block count to be replaced * @err: pointer to save return value * * Replace original inode extents and donor inode extents page by page. * We implement this replacement in the following three steps: * 1. Save the block information of original and donor inodes into * dummy extents. * 2. Change the block information of original inode to point at the * donor inode blocks. * 3. Change the block information of donor inode to point at the saved * original inode blocks in the dummy extents. * * Return replaced block count. */ static int mext_replace_branches(handle_t *handle, struct inode *orig_inode, struct inode *donor_inode, ext4_lblk_t from, ext4_lblk_t count, int *err) { struct ext4_ext_path *orig_path = NULL; struct ext4_ext_path *donor_path = NULL; struct ext4_extent *oext, *dext; struct ext4_extent tmp_dext, tmp_oext; ext4_lblk_t orig_off = from, donor_off = from; int depth; int replaced_count = 0; int dext_alen; /* Protect extent trees against block allocations via delalloc */ double_down_write_data_sem(orig_inode, donor_inode); /* Get the original extent for the block "orig_off" */ *err = get_ext_path(orig_inode, orig_off, &orig_path); if (*err) goto out; /* Get the donor extent for the head */ *err = get_ext_path(donor_inode, donor_off, &donor_path); if (*err) goto out; depth = ext_depth(orig_inode); oext = orig_path[depth].p_ext; tmp_oext = *oext; depth = ext_depth(donor_inode); dext = donor_path[depth].p_ext; tmp_dext = *dext; *err = mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off, donor_off, count); if (*err) goto out; /* Loop for the donor extents */ while (1) { /* The extent for donor must be found. */ if (!dext) { EXT4_ERROR_INODE(donor_inode, "The extent for donor must be found"); *err = -EIO; goto out; } else if (donor_off != le32_to_cpu(tmp_dext.ee_block)) { EXT4_ERROR_INODE(donor_inode, "Donor offset(%u) and the first block of donor " "extent(%u) should be equal", donor_off, le32_to_cpu(tmp_dext.ee_block)); *err = -EIO; goto out; } /* Set donor extent to orig extent */ *err = mext_leaf_block(handle, orig_inode, orig_path, &tmp_dext, &orig_off); if (*err) goto out; /* Set orig extent to donor extent */ *err = mext_leaf_block(handle, donor_inode, donor_path, &tmp_oext, &donor_off); if (*err) goto out; dext_alen = ext4_ext_get_actual_len(&tmp_dext); replaced_count += dext_alen; donor_off += dext_alen; orig_off += dext_alen; /* Already moved the expected blocks */ if (replaced_count >= count) break; if (orig_path) ext4_ext_drop_refs(orig_path); *err = get_ext_path(orig_inode, orig_off, &orig_path); if (*err) goto out; depth = ext_depth(orig_inode); oext = orig_path[depth].p_ext; tmp_oext = *oext; if (donor_path) ext4_ext_drop_refs(donor_path); *err = get_ext_path(donor_inode, donor_off, &donor_path); if (*err) goto out; depth = ext_depth(donor_inode); dext = donor_path[depth].p_ext; tmp_dext = *dext; *err = mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off, donor_off, count - replaced_count); if (*err) goto out; } out: if (orig_path) { ext4_ext_drop_refs(orig_path); kfree(orig_path); } if (donor_path) { ext4_ext_drop_refs(donor_path); kfree(donor_path); } ext4_ext_invalidate_cache(orig_inode); ext4_ext_invalidate_cache(donor_inode); double_up_write_data_sem(orig_inode, donor_inode); return replaced_count; } /** * move_extent_per_page - Move extent data per page * * @o_filp: file structure of original file * @donor_inode: donor inode * @orig_page_offset: page index on original file * @data_offset_in_page: block index where data swapping starts * @block_len_in_page: the number of blocks to be swapped * @uninit: orig extent is uninitialized or not * @err: pointer to save return value * * Save the data in original inode blocks and replace original inode extents * with donor inode extents by calling mext_replace_branches(). * Finally, write out the saved data in new original inode blocks. Return * replaced block count. */ static int move_extent_per_page(struct file *o_filp, struct inode *donor_inode, pgoff_t orig_page_offset, int data_offset_in_page, int block_len_in_page, int uninit, int *err) { struct inode *orig_inode = o_filp->f_dentry->d_inode; struct address_space *mapping = orig_inode->i_mapping; struct buffer_head *bh; struct page *page = NULL; const struct address_space_operations *a_ops = mapping->a_ops; handle_t *handle; ext4_lblk_t orig_blk_offset; long long offs = orig_page_offset << PAGE_CACHE_SHIFT; unsigned long blocksize = orig_inode->i_sb->s_blocksize; unsigned int w_flags = 0; unsigned int tmp_data_size, data_size, replaced_size; void *fsdata; int i, jblocks; int err2 = 0; int replaced_count = 0; int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits; /* * It needs twice the amount of ordinary journal buffers because * inode and donor_inode may change each different metadata blocks. */ jblocks = ext4_writepage_trans_blocks(orig_inode) * 2; handle = ext4_journal_start(orig_inode, jblocks); if (IS_ERR(handle)) { *err = PTR_ERR(handle); return 0; } if (segment_eq(get_fs(), KERNEL_DS)) w_flags |= AOP_FLAG_UNINTERRUPTIBLE; orig_blk_offset = orig_page_offset * blocks_per_page + data_offset_in_page; /* * If orig extent is uninitialized one, * it's not necessary force the page into memory * and then force it to be written out again. * Just swap data blocks between orig and donor. */ if (uninit) { replaced_count = mext_replace_branches(handle, orig_inode, donor_inode, orig_blk_offset, block_len_in_page, err); goto out2; } offs = (long long)orig_blk_offset << orig_inode->i_blkbits; /* Calculate data_size */ if ((orig_blk_offset + block_len_in_page - 1) == ((orig_inode->i_size - 1) >> orig_inode->i_blkbits)) { /* Replace the last block */ tmp_data_size = orig_inode->i_size & (blocksize - 1); /* * If data_size equal zero, it shows data_size is multiples of * blocksize. So we set appropriate value. */ if (tmp_data_size == 0) tmp_data_size = blocksize; data_size = tmp_data_size + ((block_len_in_page - 1) << orig_inode->i_blkbits); } else data_size = block_len_in_page << orig_inode->i_blkbits; replaced_size = data_size; *err = a_ops->write_begin(o_filp, mapping, offs, data_size, w_flags, &page, &fsdata); if (unlikely(*err < 0)) goto out; if (!PageUptodate(page)) { mapping->a_ops->readpage(o_filp, page); lock_page(page); } /* * try_to_release_page() doesn't call releasepage in writeback mode. * We should care about the order of writing to the same file * by multiple move extent processes. * It needs to call wait_on_page_writeback() to wait for the * writeback of the page. */ wait_on_page_writeback(page); /* Release old bh and drop refs */ try_to_release_page(page, 0); replaced_count = mext_replace_branches(handle, orig_inode, donor_inode, orig_blk_offset, block_len_in_page, &err2); if (err2) { if (replaced_count) { block_len_in_page = replaced_count; replaced_size = block_len_in_page << orig_inode->i_blkbits; } else goto out; } if (!page_has_buffers(page)) create_empty_buffers(page, 1 << orig_inode->i_blkbits, 0); bh = page_buffers(page); for (i = 0; i < data_offset_in_page; i++) bh = bh->b_this_page; for (i = 0; i < block_len_in_page; i++) { *err = ext4_get_block(orig_inode, (sector_t)(orig_blk_offset + i), bh, 0); if (*err < 0) goto out; if (bh->b_this_page != NULL) bh = bh->b_this_page; } *err = a_ops->write_end(o_filp, mapping, offs, data_size, replaced_size, page, fsdata); page = NULL; out: if (unlikely(page)) { if (PageLocked(page)) unlock_page(page); page_cache_release(page); ext4_journal_stop(handle); } out2: ext4_journal_stop(handle); if (err2) *err = err2; return replaced_count; } /** * mext_check_arguments - Check whether move extent can be done * * @orig_inode: original inode * @donor_inode: donor inode * @orig_start: logical start offset in block for orig * @donor_start: logical start offset in block for donor * @len: the number of blocks to be moved * * Check the arguments of ext4_move_extents() whether the files can be * exchanged with each other. * Return 0 on success, or a negative error value on failure. */ static int mext_check_arguments(struct inode *orig_inode, struct inode *donor_inode, __u64 orig_start, __u64 donor_start, __u64 *len) { ext4_lblk_t orig_blocks, donor_blocks; unsigned int blkbits = orig_inode->i_blkbits; unsigned int blocksize = 1 << blkbits; if (donor_inode->i_mode & (S_ISUID|S_ISGID)) { ext4_debug("ext4 move extent: suid or sgid is set" " to donor file [ino:orig %lu, donor %lu]\n", orig_inode->i_ino, donor_inode->i_ino); return -EINVAL; } if (IS_IMMUTABLE(donor_inode) || IS_APPEND(donor_inode)) return -EPERM; /* Ext4 move extent does not support swapfile */ if (IS_SWAPFILE(orig_inode) || IS_SWAPFILE(donor_inode)) { ext4_debug("ext4 move extent: The argument files should " "not be swapfile [ino:orig %lu, donor %lu]\n", orig_inode->i_ino, donor_inode->i_ino); return -EINVAL; } /* Files should be in the same ext4 FS */ if (orig_inode->i_sb != donor_inode->i_sb) { ext4_debug("ext4 move extent: The argument files " "should be in same FS [ino:orig %lu, donor %lu]\n", orig_inode->i_ino, donor_inode->i_ino); return -EINVAL; } /* Ext4 move extent supports only extent based file */ if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) { ext4_debug("ext4 move extent: orig file is not extents " "based file [ino:orig %lu]\n", orig_inode->i_ino); return -EOPNOTSUPP; } else if (!(ext4_test_inode_flag(donor_inode, EXT4_INODE_EXTENTS))) { ext4_debug("ext4 move extent: donor file is not extents " "based file [ino:donor %lu]\n", donor_inode->i_ino); return -EOPNOTSUPP; } if ((!orig_inode->i_size) || (!donor_inode->i_size)) { ext4_debug("ext4 move extent: File size is 0 byte\n"); return -EINVAL; } /* Start offset should be same */ if (orig_start != donor_start) { ext4_debug("ext4 move extent: orig and donor's start " "offset are not same [ino:orig %lu, donor %lu]\n", orig_inode->i_ino, donor_inode->i_ino); return -EINVAL; } if ((orig_start >= EXT_MAX_BLOCKS) || (donor_start >= EXT_MAX_BLOCKS) || (*len > EXT_MAX_BLOCKS) || (orig_start + *len >= EXT_MAX_BLOCKS)) { ext4_debug("ext4 move extent: Can't handle over [%u] blocks " "[ino:orig %lu, donor %lu]\n", EXT_MAX_BLOCKS, orig_inode->i_ino, donor_inode->i_ino); return -EINVAL; } if (orig_inode->i_size > donor_inode->i_size) { donor_blocks = (donor_inode->i_size + blocksize - 1) >> blkbits; /* TODO: eliminate this artificial restriction */ if (orig_start >= donor_blocks) { ext4_debug("ext4 move extent: orig start offset " "[%llu] should be less than donor file blocks " "[%u] [ino:orig %lu, donor %lu]\n", orig_start, donor_blocks, orig_inode->i_ino, donor_inode->i_ino); return -EINVAL; } /* TODO: eliminate this artificial restriction */ if (orig_start + *len > donor_blocks) { ext4_debug("ext4 move extent: End offset [%llu] should " "be less than donor file blocks [%u]." "So adjust length from %llu to %llu " "[ino:orig %lu, donor %lu]\n", orig_start + *len, donor_blocks, *len, donor_blocks - orig_start, orig_inode->i_ino, donor_inode->i_ino); *len = donor_blocks - orig_start; } } else { orig_blocks = (orig_inode->i_size + blocksize - 1) >> blkbits; if (orig_start >= orig_blocks) { ext4_debug("ext4 move extent: start offset [%llu] " "should be less than original file blocks " "[%u] [ino:orig %lu, donor %lu]\n", orig_start, orig_blocks, orig_inode->i_ino, donor_inode->i_ino); return -EINVAL; } if (orig_start + *len > orig_blocks) { ext4_debug("ext4 move extent: Adjust length " "from %llu to %llu. Because it should be " "less than original file blocks " "[ino:orig %lu, donor %lu]\n", *len, orig_blocks - orig_start, orig_inode->i_ino, donor_inode->i_ino); *len = orig_blocks - orig_start; } } if (!*len) { ext4_debug("ext4 move extent: len should not be 0 " "[ino:orig %lu, donor %lu]\n", orig_inode->i_ino, donor_inode->i_ino); return -EINVAL; } return 0; } /** * mext_inode_double_lock - Lock i_mutex on both @inode1 and @inode2 * * @inode1: the inode structure * @inode2: the inode structure * * Lock two inodes' i_mutex by i_ino order. * If inode1 or inode2 is NULL, return -EIO. Otherwise, return 0. */ static int mext_inode_double_lock(struct inode *inode1, struct inode *inode2) { int ret = 0; BUG_ON(inode1 == NULL && inode2 == NULL); ret = mext_check_null_inode(inode1, inode2, __func__, __LINE__); if (ret < 0) goto out; if (inode1 == inode2) { mutex_lock(&inode1->i_mutex); goto out; } if (inode1->i_ino < inode2->i_ino) { mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT); mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD); } else { mutex_lock_nested(&inode2->i_mutex, I_MUTEX_PARENT); mutex_lock_nested(&inode1->i_mutex, I_MUTEX_CHILD); } out: return ret; } /** * mext_inode_double_unlock - Release i_mutex on both @inode1 and @inode2 * * @inode1: the inode that is released first * @inode2: the inode that is released second * * If inode1 or inode2 is NULL, return -EIO. Otherwise, return 0. */ static int mext_inode_double_unlock(struct inode *inode1, struct inode *inode2) { int ret = 0; BUG_ON(inode1 == NULL && inode2 == NULL); ret = mext_check_null_inode(inode1, inode2, __func__, __LINE__); if (ret < 0) goto out; if (inode1) mutex_unlock(&inode1->i_mutex); if (inode2 && inode2 != inode1) mutex_unlock(&inode2->i_mutex); out: return ret; } /** * ext4_move_extents - Exchange the specified range of a file * * @o_filp: file structure of the original file * @d_filp: file structure of the donor file * @orig_start: start offset in block for orig * @donor_start: start offset in block for donor * @len: the number of blocks to be moved * @moved_len: moved block length * * This function returns 0 and moved block length is set in moved_len * if succeed, otherwise returns error value. * * Note: ext4_move_extents() proceeds the following order. * 1:ext4_move_extents() calculates the last block number of moving extent * function by the start block number (orig_start) and the number of blocks * to be moved (len) specified as arguments. * If the {orig, donor}_start points a hole, the extent's start offset * pointed by ext_cur (current extent), holecheck_path, orig_path are set * after hole behind. * 2:Continue step 3 to step 5, until the holecheck_path points to last_extent * or the ext_cur exceeds the block_end which is last logical block number. * 3:To get the length of continues area, call mext_next_extent() * specified with the ext_cur (initial value is holecheck_path) re-cursive, * until find un-continuous extent, the start logical block number exceeds * the block_end or the extent points to the last extent. * 4:Exchange the original inode data with donor inode data * from orig_page_offset to seq_end_page. * The start indexes of data are specified as arguments. * That of the original inode is orig_page_offset, * and the donor inode is also orig_page_offset * (To easily handle blocksize != pagesize case, the offset for the * donor inode is block unit). * 5:Update holecheck_path and orig_path to points a next proceeding extent, * then returns to step 2. * 6:Release holecheck_path, orig_path and set the len to moved_len * which shows the number of moved blocks. * The moved_len is useful for the command to calculate the file offset * for starting next move extent ioctl. * 7:Return 0 on success, or a negative error value on failure. */ int ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_start, __u64 donor_start, __u64 len, __u64 *moved_len) { struct inode *orig_inode = o_filp->f_dentry->d_inode; struct inode *donor_inode = d_filp->f_dentry->d_inode; struct ext4_ext_path *orig_path = NULL, *holecheck_path = NULL; struct ext4_extent *ext_prev, *ext_cur, *ext_dummy; ext4_lblk_t block_start = orig_start; ext4_lblk_t block_end, seq_start, add_blocks, file_end, seq_blocks = 0; ext4_lblk_t rest_blocks; pgoff_t orig_page_offset = 0, seq_end_page; int ret1, ret2, depth, last_extent = 0; int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits; int data_offset_in_page; int block_len_in_page; int uninit; /* orig and donor should be different file */ if (orig_inode->i_ino == donor_inode->i_ino) { ext4_debug("ext4 move extent: The argument files should not " "be same file [ino:orig %lu, donor %lu]\n", orig_inode->i_ino, donor_inode->i_ino); return -EINVAL; } /* Regular file check */ if (!S_ISREG(orig_inode->i_mode) || !S_ISREG(donor_inode->i_mode)) { ext4_debug("ext4 move extent: The argument files should be " "regular file [ino:orig %lu, donor %lu]\n", orig_inode->i_ino, donor_inode->i_ino); return -EINVAL; } /* Protect orig and donor inodes against a truncate */ ret1 = mext_inode_double_lock(orig_inode, donor_inode); if (ret1 < 0) return ret1; /* Protect extent tree against block allocations via delalloc */ double_down_write_data_sem(orig_inode, donor_inode); /* Check the filesystem environment whether move_extent can be done */ ret1 = mext_check_arguments(orig_inode, donor_inode, orig_start, donor_start, &len); if (ret1) goto out; file_end = (i_size_read(orig_inode) - 1) >> orig_inode->i_blkbits; block_end = block_start + len - 1; if (file_end < block_end) len -= block_end - file_end; ret1 = get_ext_path(orig_inode, block_start, &orig_path); if (ret1) goto out; /* Get path structure to check the hole */ ret1 = get_ext_path(orig_inode, block_start, &holecheck_path); if (ret1) goto out; depth = ext_depth(orig_inode); ext_cur = holecheck_path[depth].p_ext; /* * Get proper starting location of block replacement if block_start was * within the hole. */ if (le32_to_cpu(ext_cur->ee_block) + ext4_ext_get_actual_len(ext_cur) - 1 < block_start) { /* * The hole exists between extents or the tail of * original file. */ last_extent = mext_next_extent(orig_inode, holecheck_path, &ext_cur); if (last_extent < 0) { ret1 = last_extent; goto out; } last_extent = mext_next_extent(orig_inode, orig_path, &ext_dummy); if (last_extent < 0) { ret1 = last_extent; goto out; } seq_start = le32_to_cpu(ext_cur->ee_block); } else if (le32_to_cpu(ext_cur->ee_block) > block_start) /* The hole exists at the beginning of original file. */ seq_start = le32_to_cpu(ext_cur->ee_block); else seq_start = block_start; /* No blocks within the specified range. */ if (le32_to_cpu(ext_cur->ee_block) > block_end) { ext4_debug("ext4 move extent: The specified range of file " "may be the hole\n"); ret1 = -EINVAL; goto out; } /* Adjust start blocks */ add_blocks = min(le32_to_cpu(ext_cur->ee_block) + ext4_ext_get_actual_len(ext_cur), block_end + 1) - max(le32_to_cpu(ext_cur->ee_block), block_start); while (!last_extent && le32_to_cpu(ext_cur->ee_block) <= block_end) { seq_blocks += add_blocks; /* Adjust tail blocks */ if (seq_start + seq_blocks - 1 > block_end) seq_blocks = block_end - seq_start + 1; ext_prev = ext_cur; last_extent = mext_next_extent(orig_inode, holecheck_path, &ext_cur); if (last_extent < 0) { ret1 = last_extent; break; } add_blocks = ext4_ext_get_actual_len(ext_cur); /* * Extend the length of contiguous block (seq_blocks) * if extents are contiguous. */ if (ext4_can_extents_be_merged(orig_inode, ext_prev, ext_cur) && block_end >= le32_to_cpu(ext_cur->ee_block) && !last_extent) continue; /* Is original extent is uninitialized */ uninit = ext4_ext_is_uninitialized(ext_prev); data_offset_in_page = seq_start % blocks_per_page; /* * Calculate data blocks count that should be swapped * at the first page. */ if (data_offset_in_page + seq_blocks > blocks_per_page) { /* Swapped blocks are across pages */ block_len_in_page = blocks_per_page - data_offset_in_page; } else { /* Swapped blocks are in a page */ block_len_in_page = seq_blocks; } orig_page_offset = seq_start >> (PAGE_CACHE_SHIFT - orig_inode->i_blkbits); seq_end_page = (seq_start + seq_blocks - 1) >> (PAGE_CACHE_SHIFT - orig_inode->i_blkbits); seq_start = le32_to_cpu(ext_cur->ee_block); rest_blocks = seq_blocks; /* * Up semaphore to avoid following problems: * a. transaction deadlock among ext4_journal_start, * ->write_begin via pagefault, and jbd2_journal_commit * b. racing with ->readpage, ->write_begin, and ext4_get_block * in move_extent_per_page */ double_up_write_data_sem(orig_inode, donor_inode); while (orig_page_offset <= seq_end_page) { /* Swap original branches with new branches */ block_len_in_page = move_extent_per_page( o_filp, donor_inode, orig_page_offset, data_offset_in_page, block_len_in_page, uninit, &ret1); /* Count how many blocks we have exchanged */ *moved_len += block_len_in_page; if (ret1 < 0) break; if (*moved_len > len) { EXT4_ERROR_INODE(orig_inode, "We replaced blocks too much! " "sum of replaced: %llu requested: %llu", *moved_len, len); ret1 = -EIO; break; } orig_page_offset++; data_offset_in_page = 0; rest_blocks -= block_len_in_page; if (rest_blocks > blocks_per_page) block_len_in_page = blocks_per_page; else block_len_in_page = rest_blocks; } double_down_write_data_sem(orig_inode, donor_inode); if (ret1 < 0) break; /* Decrease buffer counter */ if (holecheck_path) ext4_ext_drop_refs(holecheck_path); ret1 = get_ext_path(orig_inode, seq_start, &holecheck_path); if (ret1) break; depth = holecheck_path->p_depth; /* Decrease buffer counter */ if (orig_path) ext4_ext_drop_refs(orig_path); ret1 = get_ext_path(orig_inode, seq_start, &orig_path); if (ret1) break; ext_cur = holecheck_path[depth].p_ext; add_blocks = ext4_ext_get_actual_len(ext_cur); seq_blocks = 0; } out: if (*moved_len) { ext4_discard_preallocations(orig_inode); ext4_discard_preallocations(donor_inode); } if (orig_path) { ext4_ext_drop_refs(orig_path); kfree(orig_path); } if (holecheck_path) { ext4_ext_drop_refs(holecheck_path); kfree(holecheck_path); } double_up_write_data_sem(orig_inode, donor_inode); ret2 = mext_inode_double_unlock(orig_inode, donor_inode); if (ret1) return ret1; else if (ret2) return ret2; return 0; }
gpl-2.0
CyanogenMod/android_kernel_google_steelhead
drivers/media/video/zoran/zr36060.c
3337
30674
/* * Zoran ZR36060 basic configuration functions * * Copyright (C) 2002 Laurent Pinchart <laurent.pinchart@skynet.be> * * $Id: zr36060.c,v 1.1.2.22 2003/05/06 09:35:36 rbultje Exp $ * * ------------------------------------------------------------------------ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * ------------------------------------------------------------------------ */ #define ZR060_VERSION "v0.7" #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/types.h> #include <linux/wait.h> /* I/O commands, error codes */ #include <asm/io.h> /* headerfile of this module */ #include "zr36060.h" /* codec io API */ #include "videocodec.h" /* it doesn't make sense to have more than 20 or so, just to prevent some unwanted loops */ #define MAX_CODECS 20 /* amount of chips attached via this driver */ static int zr36060_codecs; static int low_bitrate; module_param(low_bitrate, bool, 0); MODULE_PARM_DESC(low_bitrate, "Buz compatibility option, halves bitrate"); /* debugging is available via module parameter */ static int debug; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0-4)"); #define dprintk(num, format, args...) \ do { \ if (debug >= num) \ printk(format, ##args); \ } while (0) /* ========================================================================= Local hardware I/O functions: read/write via codec layer (registers are located in the master device) ========================================================================= */ /* read and write functions */ static u8 zr36060_read (struct zr36060 *ptr, u16 reg) { u8 value = 0; // just in case something is wrong... if (ptr->codec->master_data->readreg) value = (ptr->codec->master_data->readreg(ptr->codec, reg)) & 0xff; else dprintk(1, KERN_ERR "%s: invalid I/O setup, nothing read!\n", ptr->name); //dprintk(4, "%s: reading from 0x%04x: %02x\n",ptr->name,reg,value); return value; } static void zr36060_write(struct zr36060 *ptr, u16 reg, u8 value) { //dprintk(4, "%s: writing 0x%02x to 0x%04x\n",ptr->name,value,reg); dprintk(4, "0x%02x @0x%04x\n", value, reg); // just in case something is wrong... if (ptr->codec->master_data->writereg) ptr->codec->master_data->writereg(ptr->codec, reg, value); else dprintk(1, KERN_ERR "%s: invalid I/O setup, nothing written!\n", ptr->name); } /* ========================================================================= Local helper function: status read ========================================================================= */ /* status is kept in datastructure */ static u8 zr36060_read_status (struct zr36060 *ptr) { ptr->status = zr36060_read(ptr, ZR060_CFSR); zr36060_read(ptr, 0); return ptr->status; } /* ========================================================================= Local helper function: scale factor read ========================================================================= */ /* scale factor is kept in datastructure */ static u16 zr36060_read_scalefactor (struct zr36060 *ptr) { ptr->scalefact = (zr36060_read(ptr, ZR060_SF_HI) << 8) | (zr36060_read(ptr, ZR060_SF_LO) & 0xFF); /* leave 0 selected for an eventually GO from master */ zr36060_read(ptr, 0); return ptr->scalefact; } /* ========================================================================= Local helper function: wait if codec is ready to proceed (end of processing) or time is over ========================================================================= */ static void zr36060_wait_end (struct zr36060 *ptr) { int i = 0; while (zr36060_read_status(ptr) & ZR060_CFSR_Busy) { udelay(1); if (i++ > 200000) { // 200ms, there is for sure something wrong!!! dprintk(1, "%s: timeout at wait_end (last status: 0x%02x)\n", ptr->name, ptr->status); break; } } } /* ========================================================================= Local helper function: basic test of "connectivity", writes/reads to/from memory the SOF marker ========================================================================= */ static int zr36060_basic_test (struct zr36060 *ptr) { if ((zr36060_read(ptr, ZR060_IDR_DEV) != 0x33) && (zr36060_read(ptr, ZR060_IDR_REV) != 0x01)) { dprintk(1, KERN_ERR "%s: attach failed, can't connect to jpeg processor!\n", ptr->name); return -ENXIO; } zr36060_wait_end(ptr); if (ptr->status & ZR060_CFSR_Busy) { dprintk(1, KERN_ERR "%s: attach failed, jpeg processor failed (end flag)!\n", ptr->name); return -EBUSY; } return 0; /* looks good! */ } /* ========================================================================= Local helper function: simple loop for pushing the init datasets ========================================================================= */ static int zr36060_pushit (struct zr36060 *ptr, u16 startreg, u16 len, const char *data) { int i = 0; dprintk(4, "%s: write data block to 0x%04x (len=%d)\n", ptr->name, startreg, len); while (i < len) { zr36060_write(ptr, startreg++, data[i++]); } return i; } /* ========================================================================= Basic datasets: jpeg baseline setup data (you find it on lots places in internet, or just extract it from any regular .jpg image...) Could be variable, but until it's not needed it they are just fixed to save memory. Otherwise expand zr36060 structure with arrays, push the values to it and initialize from there, as e.g. the linux zr36057/60 driver does it. ========================================================================= */ static const char zr36060_dqt[0x86] = { 0xff, 0xdb, //Marker: DQT 0x00, 0x84, //Length: 2*65+2 0x00, //Pq,Tq first table 0x10, 0x0b, 0x0c, 0x0e, 0x0c, 0x0a, 0x10, 0x0e, 0x0d, 0x0e, 0x12, 0x11, 0x10, 0x13, 0x18, 0x28, 0x1a, 0x18, 0x16, 0x16, 0x18, 0x31, 0x23, 0x25, 0x1d, 0x28, 0x3a, 0x33, 0x3d, 0x3c, 0x39, 0x33, 0x38, 0x37, 0x40, 0x48, 0x5c, 0x4e, 0x40, 0x44, 0x57, 0x45, 0x37, 0x38, 0x50, 0x6d, 0x51, 0x57, 0x5f, 0x62, 0x67, 0x68, 0x67, 0x3e, 0x4d, 0x71, 0x79, 0x70, 0x64, 0x78, 0x5c, 0x65, 0x67, 0x63, 0x01, //Pq,Tq second table 0x11, 0x12, 0x12, 0x18, 0x15, 0x18, 0x2f, 0x1a, 0x1a, 0x2f, 0x63, 0x42, 0x38, 0x42, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63 }; static const char zr36060_dht[0x1a4] = { 0xff, 0xc4, //Marker: DHT 0x01, 0xa2, //Length: 2*AC, 2*DC 0x00, //DC first table 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x01, //DC second table 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x10, //AC first table 0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03, 0x05, 0x05, 0x04, 0x04, 0x00, 0x00, 0x01, 0x7D, 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12, 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07, 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xA1, 0x08, 0x23, 0x42, 0xB1, 0xC1, 0x15, 0x52, 0xD1, 0xF0, 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0A, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0x11, //AC second table 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04, 0x07, 0x05, 0x04, 0x04, 0x00, 0x01, 0x02, 0x77, 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21, 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71, 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91, 0xA1, 0xB1, 0xC1, 0x09, 0x23, 0x33, 0x52, 0xF0, 0x15, 0x62, 0x72, 0xD1, 0x0A, 0x16, 0x24, 0x34, 0xE1, 0x25, 0xF1, 0x17, 0x18, 0x19, 0x1A, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA }; /* jpeg baseline setup, this is just fixed in this driver (YUV pictures) */ #define NO_OF_COMPONENTS 0x3 //Y,U,V #define BASELINE_PRECISION 0x8 //MCU size (?) static const char zr36060_tq[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's QT static const char zr36060_td[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's DC static const char zr36060_ta[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's AC /* horizontal 422 decimation setup (maybe we support 411 or so later, too) */ static const char zr36060_decimation_h[8] = { 2, 1, 1, 0, 0, 0, 0, 0 }; static const char zr36060_decimation_v[8] = { 1, 1, 1, 0, 0, 0, 0, 0 }; /* ========================================================================= Local helper functions: calculation and setup of parameter-dependent JPEG baseline segments (needed for compression only) ========================================================================= */ /* ------------------------------------------------------------------------- */ /* SOF (start of frame) segment depends on width, height and sampling ratio of each color component */ static int zr36060_set_sof (struct zr36060 *ptr) { char sof_data[34]; // max. size of register set int i; dprintk(3, "%s: write SOF (%dx%d, %d components)\n", ptr->name, ptr->width, ptr->height, NO_OF_COMPONENTS); sof_data[0] = 0xff; sof_data[1] = 0xc0; sof_data[2] = 0x00; sof_data[3] = (3 * NO_OF_COMPONENTS) + 8; sof_data[4] = BASELINE_PRECISION; // only '8' possible with zr36060 sof_data[5] = (ptr->height) >> 8; sof_data[6] = (ptr->height) & 0xff; sof_data[7] = (ptr->width) >> 8; sof_data[8] = (ptr->width) & 0xff; sof_data[9] = NO_OF_COMPONENTS; for (i = 0; i < NO_OF_COMPONENTS; i++) { sof_data[10 + (i * 3)] = i; // index identifier sof_data[11 + (i * 3)] = (ptr->h_samp_ratio[i] << 4) | (ptr->v_samp_ratio[i]); // sampling ratios sof_data[12 + (i * 3)] = zr36060_tq[i]; // Q table selection } return zr36060_pushit(ptr, ZR060_SOF_IDX, (3 * NO_OF_COMPONENTS) + 10, sof_data); } /* ------------------------------------------------------------------------- */ /* SOS (start of scan) segment depends on the used scan components of each color component */ static int zr36060_set_sos (struct zr36060 *ptr) { char sos_data[16]; // max. size of register set int i; dprintk(3, "%s: write SOS\n", ptr->name); sos_data[0] = 0xff; sos_data[1] = 0xda; sos_data[2] = 0x00; sos_data[3] = 2 + 1 + (2 * NO_OF_COMPONENTS) + 3; sos_data[4] = NO_OF_COMPONENTS; for (i = 0; i < NO_OF_COMPONENTS; i++) { sos_data[5 + (i * 2)] = i; // index sos_data[6 + (i * 2)] = (zr36060_td[i] << 4) | zr36060_ta[i]; // AC/DC tbl.sel. } sos_data[2 + 1 + (2 * NO_OF_COMPONENTS) + 2] = 00; // scan start sos_data[2 + 1 + (2 * NO_OF_COMPONENTS) + 3] = 0x3f; sos_data[2 + 1 + (2 * NO_OF_COMPONENTS) + 4] = 00; return zr36060_pushit(ptr, ZR060_SOS_IDX, 4 + 1 + (2 * NO_OF_COMPONENTS) + 3, sos_data); } /* ------------------------------------------------------------------------- */ /* DRI (define restart interval) */ static int zr36060_set_dri (struct zr36060 *ptr) { char dri_data[6]; // max. size of register set dprintk(3, "%s: write DRI\n", ptr->name); dri_data[0] = 0xff; dri_data[1] = 0xdd; dri_data[2] = 0x00; dri_data[3] = 0x04; dri_data[4] = (ptr->dri) >> 8; dri_data[5] = (ptr->dri) & 0xff; return zr36060_pushit(ptr, ZR060_DRI_IDX, 6, dri_data); } /* ========================================================================= Setup function: Setup compression/decompression of Zoran's JPEG processor ( see also zoran 36060 manual ) ... sorry for the spaghetti code ... ========================================================================= */ static void zr36060_init (struct zr36060 *ptr) { int sum = 0; long bitcnt, tmp; if (ptr->mode == CODEC_DO_COMPRESSION) { dprintk(2, "%s: COMPRESSION SETUP\n", ptr->name); zr36060_write(ptr, ZR060_LOAD, ZR060_LOAD_SyncRst); /* 060 communicates with 067 in master mode */ zr36060_write(ptr, ZR060_CIR, ZR060_CIR_CodeMstr); /* Compression with or without variable scale factor */ /*FIXME: What about ptr->bitrate_ctrl? */ zr36060_write(ptr, ZR060_CMR, ZR060_CMR_Comp | ZR060_CMR_Pass2 | ZR060_CMR_BRB); /* Must be zero */ zr36060_write(ptr, ZR060_MBZ, 0x00); zr36060_write(ptr, ZR060_TCR_HI, 0x00); zr36060_write(ptr, ZR060_TCR_LO, 0x00); /* Disable all IRQs - no DataErr means autoreset */ zr36060_write(ptr, ZR060_IMR, 0); /* volume control settings */ zr36060_write(ptr, ZR060_SF_HI, ptr->scalefact >> 8); zr36060_write(ptr, ZR060_SF_LO, ptr->scalefact & 0xff); zr36060_write(ptr, ZR060_AF_HI, 0xff); zr36060_write(ptr, ZR060_AF_M, 0xff); zr36060_write(ptr, ZR060_AF_LO, 0xff); /* setup the variable jpeg tables */ sum += zr36060_set_sof(ptr); sum += zr36060_set_sos(ptr); sum += zr36060_set_dri(ptr); /* setup the fixed jpeg tables - maybe variable, though - * (see table init section above) */ sum += zr36060_pushit(ptr, ZR060_DQT_IDX, sizeof(zr36060_dqt), zr36060_dqt); sum += zr36060_pushit(ptr, ZR060_DHT_IDX, sizeof(zr36060_dht), zr36060_dht); zr36060_write(ptr, ZR060_APP_IDX, 0xff); zr36060_write(ptr, ZR060_APP_IDX + 1, 0xe0 + ptr->app.appn); zr36060_write(ptr, ZR060_APP_IDX + 2, 0x00); zr36060_write(ptr, ZR060_APP_IDX + 3, ptr->app.len + 2); sum += zr36060_pushit(ptr, ZR060_APP_IDX + 4, 60, ptr->app.data) + 4; zr36060_write(ptr, ZR060_COM_IDX, 0xff); zr36060_write(ptr, ZR060_COM_IDX + 1, 0xfe); zr36060_write(ptr, ZR060_COM_IDX + 2, 0x00); zr36060_write(ptr, ZR060_COM_IDX + 3, ptr->com.len + 2); sum += zr36060_pushit(ptr, ZR060_COM_IDX + 4, 60, ptr->com.data) + 4; /* setup misc. data for compression (target code sizes) */ /* size of compressed code to reach without header data */ sum = ptr->real_code_vol - sum; bitcnt = sum << 3; /* need the size in bits */ tmp = bitcnt >> 16; dprintk(3, "%s: code: csize=%d, tot=%d, bit=%ld, highbits=%ld\n", ptr->name, sum, ptr->real_code_vol, bitcnt, tmp); zr36060_write(ptr, ZR060_TCV_NET_HI, tmp >> 8); zr36060_write(ptr, ZR060_TCV_NET_MH, tmp & 0xff); tmp = bitcnt & 0xffff; zr36060_write(ptr, ZR060_TCV_NET_ML, tmp >> 8); zr36060_write(ptr, ZR060_TCV_NET_LO, tmp & 0xff); bitcnt -= bitcnt >> 7; // bits without stuffing bitcnt -= ((bitcnt * 5) >> 6); // bits without eob tmp = bitcnt >> 16; dprintk(3, "%s: code: nettobit=%ld, highnettobits=%ld\n", ptr->name, bitcnt, tmp); zr36060_write(ptr, ZR060_TCV_DATA_HI, tmp >> 8); zr36060_write(ptr, ZR060_TCV_DATA_MH, tmp & 0xff); tmp = bitcnt & 0xffff; zr36060_write(ptr, ZR060_TCV_DATA_ML, tmp >> 8); zr36060_write(ptr, ZR060_TCV_DATA_LO, tmp & 0xff); /* JPEG markers to be included in the compressed stream */ zr36060_write(ptr, ZR060_MER, ZR060_MER_DQT | ZR060_MER_DHT | ((ptr->com.len > 0) ? ZR060_MER_Com : 0) | ((ptr->app.len > 0) ? ZR060_MER_App : 0)); /* Setup the Video Frontend */ /* Limit pixel range to 16..235 as per CCIR-601 */ zr36060_write(ptr, ZR060_VCR, ZR060_VCR_Range); } else { dprintk(2, "%s: EXPANSION SETUP\n", ptr->name); zr36060_write(ptr, ZR060_LOAD, ZR060_LOAD_SyncRst); /* 060 communicates with 067 in master mode */ zr36060_write(ptr, ZR060_CIR, ZR060_CIR_CodeMstr); /* Decompression */ zr36060_write(ptr, ZR060_CMR, 0); /* Must be zero */ zr36060_write(ptr, ZR060_MBZ, 0x00); zr36060_write(ptr, ZR060_TCR_HI, 0x00); zr36060_write(ptr, ZR060_TCR_LO, 0x00); /* Disable all IRQs - no DataErr means autoreset */ zr36060_write(ptr, ZR060_IMR, 0); /* setup misc. data for expansion */ zr36060_write(ptr, ZR060_MER, 0); /* setup the fixed jpeg tables - maybe variable, though - * (see table init section above) */ zr36060_pushit(ptr, ZR060_DHT_IDX, sizeof(zr36060_dht), zr36060_dht); /* Setup the Video Frontend */ //zr36060_write(ptr, ZR060_VCR, ZR060_VCR_FIExt); //this doesn't seem right and doesn't work... zr36060_write(ptr, ZR060_VCR, ZR060_VCR_Range); } /* Load the tables */ zr36060_write(ptr, ZR060_LOAD, ZR060_LOAD_SyncRst | ZR060_LOAD_Load); zr36060_wait_end(ptr); dprintk(2, "%s: Status after table preload: 0x%02x\n", ptr->name, ptr->status); if (ptr->status & ZR060_CFSR_Busy) { dprintk(1, KERN_ERR "%s: init aborted!\n", ptr->name); return; // something is wrong, its timed out!!!! } } /* ========================================================================= CODEC API FUNCTIONS this functions are accessed by the master via the API structure ========================================================================= */ /* set compression/expansion mode and launches codec - this should be the last call from the master before starting processing */ static int zr36060_set_mode (struct videocodec *codec, int mode) { struct zr36060 *ptr = (struct zr36060 *) codec->data; dprintk(2, "%s: set_mode %d call\n", ptr->name, mode); if ((mode != CODEC_DO_EXPANSION) && (mode != CODEC_DO_COMPRESSION)) return -EINVAL; ptr->mode = mode; zr36060_init(ptr); return 0; } /* set picture size (norm is ignored as the codec doesn't know about it) */ static int zr36060_set_video (struct videocodec *codec, struct tvnorm *norm, struct vfe_settings *cap, struct vfe_polarity *pol) { struct zr36060 *ptr = (struct zr36060 *) codec->data; u32 reg; int size; dprintk(2, "%s: set_video %d/%d-%dx%d (%%%d) call\n", ptr->name, cap->x, cap->y, cap->width, cap->height, cap->decimation); /* if () return -EINVAL; * trust the master driver that it knows what it does - so * we allow invalid startx/y and norm for now ... */ ptr->width = cap->width / (cap->decimation & 0xff); ptr->height = cap->height / (cap->decimation >> 8); zr36060_write(ptr, ZR060_LOAD, ZR060_LOAD_SyncRst); /* Note that VSPol/HSPol bits in zr36060 have the opposite * meaning of their zr360x7 counterparts with the same names * N.b. for VSPol this is only true if FIVEdge = 0 (default, * left unchanged here - in accordance with datasheet). */ reg = (!pol->vsync_pol ? ZR060_VPR_VSPol : 0) | (!pol->hsync_pol ? ZR060_VPR_HSPol : 0) | (pol->field_pol ? ZR060_VPR_FIPol : 0) | (pol->blank_pol ? ZR060_VPR_BLPol : 0) | (pol->subimg_pol ? ZR060_VPR_SImgPol : 0) | (pol->poe_pol ? ZR060_VPR_PoePol : 0) | (pol->pvalid_pol ? ZR060_VPR_PValPol : 0) | (pol->vclk_pol ? ZR060_VPR_VCLKPol : 0); zr36060_write(ptr, ZR060_VPR, reg); reg = 0; switch (cap->decimation & 0xff) { default: case 1: break; case 2: reg |= ZR060_SR_HScale2; break; case 4: reg |= ZR060_SR_HScale4; break; } switch (cap->decimation >> 8) { default: case 1: break; case 2: reg |= ZR060_SR_VScale; break; } zr36060_write(ptr, ZR060_SR, reg); zr36060_write(ptr, ZR060_BCR_Y, 0x00); zr36060_write(ptr, ZR060_BCR_U, 0x80); zr36060_write(ptr, ZR060_BCR_V, 0x80); /* sync generator */ reg = norm->Ht - 1; /* Vtotal */ zr36060_write(ptr, ZR060_SGR_VTOTAL_HI, (reg >> 8) & 0xff); zr36060_write(ptr, ZR060_SGR_VTOTAL_LO, (reg >> 0) & 0xff); reg = norm->Wt - 1; /* Htotal */ zr36060_write(ptr, ZR060_SGR_HTOTAL_HI, (reg >> 8) & 0xff); zr36060_write(ptr, ZR060_SGR_HTOTAL_LO, (reg >> 0) & 0xff); reg = 6 - 1; /* VsyncSize */ zr36060_write(ptr, ZR060_SGR_VSYNC, reg); //reg = 30 - 1; /* HsyncSize */ ///*CP*/ reg = (zr->params.norm == 1 ? 57 : 68); reg = 68; zr36060_write(ptr, ZR060_SGR_HSYNC, reg); reg = norm->VStart - 1; /* BVstart */ zr36060_write(ptr, ZR060_SGR_BVSTART, reg); reg += norm->Ha / 2; /* BVend */ zr36060_write(ptr, ZR060_SGR_BVEND_HI, (reg >> 8) & 0xff); zr36060_write(ptr, ZR060_SGR_BVEND_LO, (reg >> 0) & 0xff); reg = norm->HStart - 1; /* BHstart */ zr36060_write(ptr, ZR060_SGR_BHSTART, reg); reg += norm->Wa; /* BHend */ zr36060_write(ptr, ZR060_SGR_BHEND_HI, (reg >> 8) & 0xff); zr36060_write(ptr, ZR060_SGR_BHEND_LO, (reg >> 0) & 0xff); /* active area */ reg = cap->y + norm->VStart; /* Vstart */ zr36060_write(ptr, ZR060_AAR_VSTART_HI, (reg >> 8) & 0xff); zr36060_write(ptr, ZR060_AAR_VSTART_LO, (reg >> 0) & 0xff); reg += cap->height; /* Vend */ zr36060_write(ptr, ZR060_AAR_VEND_HI, (reg >> 8) & 0xff); zr36060_write(ptr, ZR060_AAR_VEND_LO, (reg >> 0) & 0xff); reg = cap->x + norm->HStart; /* Hstart */ zr36060_write(ptr, ZR060_AAR_HSTART_HI, (reg >> 8) & 0xff); zr36060_write(ptr, ZR060_AAR_HSTART_LO, (reg >> 0) & 0xff); reg += cap->width; /* Hend */ zr36060_write(ptr, ZR060_AAR_HEND_HI, (reg >> 8) & 0xff); zr36060_write(ptr, ZR060_AAR_HEND_LO, (reg >> 0) & 0xff); /* subimage area */ reg = norm->VStart - 4; /* SVstart */ zr36060_write(ptr, ZR060_SWR_VSTART_HI, (reg >> 8) & 0xff); zr36060_write(ptr, ZR060_SWR_VSTART_LO, (reg >> 0) & 0xff); reg += norm->Ha / 2 + 8; /* SVend */ zr36060_write(ptr, ZR060_SWR_VEND_HI, (reg >> 8) & 0xff); zr36060_write(ptr, ZR060_SWR_VEND_LO, (reg >> 0) & 0xff); reg = norm->HStart /*+ 64 */ - 4; /* SHstart */ zr36060_write(ptr, ZR060_SWR_HSTART_HI, (reg >> 8) & 0xff); zr36060_write(ptr, ZR060_SWR_HSTART_LO, (reg >> 0) & 0xff); reg += norm->Wa + 8; /* SHend */ zr36060_write(ptr, ZR060_SWR_HEND_HI, (reg >> 8) & 0xff); zr36060_write(ptr, ZR060_SWR_HEND_LO, (reg >> 0) & 0xff); size = ptr->width * ptr->height; /* Target compressed field size in bits: */ size = size * 16; /* uncompressed size in bits */ /* (Ronald) by default, quality = 100 is a compression * ratio 1:2. Setting low_bitrate (insmod option) sets * it to 1:4 (instead of 1:2, zr36060 max) as limit because the * buz can't handle more at decimation=1... Use low_bitrate if * you have a Buz, unless you know what you're doing */ size = size * cap->quality / (low_bitrate ? 400 : 200); /* Lower limit (arbitrary, 1 KB) */ if (size < 8192) size = 8192; /* Upper limit: 7/8 of the code buffers */ if (size > ptr->total_code_vol * 7) size = ptr->total_code_vol * 7; ptr->real_code_vol = size >> 3; /* in bytes */ /* the MBCVR is the *maximum* block volume, according to the * JPEG ISO specs, this shouldn't be used, since that allows * for the best encoding quality. So set it to it's max value */ reg = ptr->max_block_vol; zr36060_write(ptr, ZR060_MBCVR, reg); return 0; } /* additional control functions */ static int zr36060_control (struct videocodec *codec, int type, int size, void *data) { struct zr36060 *ptr = (struct zr36060 *) codec->data; int *ival = (int *) data; dprintk(2, "%s: control %d call with %d byte\n", ptr->name, type, size); switch (type) { case CODEC_G_STATUS: /* get last status */ if (size != sizeof(int)) return -EFAULT; zr36060_read_status(ptr); *ival = ptr->status; break; case CODEC_G_CODEC_MODE: if (size != sizeof(int)) return -EFAULT; *ival = CODEC_MODE_BJPG; break; case CODEC_S_CODEC_MODE: if (size != sizeof(int)) return -EFAULT; if (*ival != CODEC_MODE_BJPG) return -EINVAL; /* not needed, do nothing */ return 0; case CODEC_G_VFE: case CODEC_S_VFE: /* not needed, do nothing */ return 0; case CODEC_S_MMAP: /* not available, give an error */ return -ENXIO; case CODEC_G_JPEG_TDS_BYTE: /* get target volume in byte */ if (size != sizeof(int)) return -EFAULT; *ival = ptr->total_code_vol; break; case CODEC_S_JPEG_TDS_BYTE: /* get target volume in byte */ if (size != sizeof(int)) return -EFAULT; ptr->total_code_vol = *ival; ptr->real_code_vol = (ptr->total_code_vol * 6) >> 3; break; case CODEC_G_JPEG_SCALE: /* get scaling factor */ if (size != sizeof(int)) return -EFAULT; *ival = zr36060_read_scalefactor(ptr); break; case CODEC_S_JPEG_SCALE: /* set scaling factor */ if (size != sizeof(int)) return -EFAULT; ptr->scalefact = *ival; break; case CODEC_G_JPEG_APP_DATA: { /* get appn marker data */ struct jpeg_app_marker *app = data; if (size != sizeof(struct jpeg_app_marker)) return -EFAULT; *app = ptr->app; break; } case CODEC_S_JPEG_APP_DATA: { /* set appn marker data */ struct jpeg_app_marker *app = data; if (size != sizeof(struct jpeg_app_marker)) return -EFAULT; ptr->app = *app; break; } case CODEC_G_JPEG_COM_DATA: { /* get comment marker data */ struct jpeg_com_marker *com = data; if (size != sizeof(struct jpeg_com_marker)) return -EFAULT; *com = ptr->com; break; } case CODEC_S_JPEG_COM_DATA: { /* set comment marker data */ struct jpeg_com_marker *com = data; if (size != sizeof(struct jpeg_com_marker)) return -EFAULT; ptr->com = *com; break; } default: return -EINVAL; } return size; } /* ========================================================================= Exit and unregister function: Deinitializes Zoran's JPEG processor ========================================================================= */ static int zr36060_unset (struct videocodec *codec) { struct zr36060 *ptr = codec->data; if (ptr) { /* do wee need some codec deinit here, too ???? */ dprintk(1, "%s: finished codec #%d\n", ptr->name, ptr->num); kfree(ptr); codec->data = NULL; zr36060_codecs--; return 0; } return -EFAULT; } /* ========================================================================= Setup and registry function: Initializes Zoran's JPEG processor Also sets pixel size, average code size, mode (compr./decompr.) (the given size is determined by the processor with the video interface) ========================================================================= */ static int zr36060_setup (struct videocodec *codec) { struct zr36060 *ptr; int res; dprintk(2, "zr36060: initializing MJPEG subsystem #%d.\n", zr36060_codecs); if (zr36060_codecs == MAX_CODECS) { dprintk(1, KERN_ERR "zr36060: Can't attach more codecs!\n"); return -ENOSPC; } //mem structure init codec->data = ptr = kzalloc(sizeof(struct zr36060), GFP_KERNEL); if (NULL == ptr) { dprintk(1, KERN_ERR "zr36060: Can't get enough memory!\n"); return -ENOMEM; } snprintf(ptr->name, sizeof(ptr->name), "zr36060[%d]", zr36060_codecs); ptr->num = zr36060_codecs++; ptr->codec = codec; //testing res = zr36060_basic_test(ptr); if (res < 0) { zr36060_unset(codec); return res; } //final setup memcpy(ptr->h_samp_ratio, zr36060_decimation_h, 8); memcpy(ptr->v_samp_ratio, zr36060_decimation_v, 8); ptr->bitrate_ctrl = 0; /* 0 or 1 - fixed file size flag * (what is the difference?) */ ptr->mode = CODEC_DO_COMPRESSION; ptr->width = 384; ptr->height = 288; ptr->total_code_vol = 16000; /* CHECKME */ ptr->real_code_vol = (ptr->total_code_vol * 6) >> 3; ptr->max_block_vol = 240; /* CHECKME, was 120 is 240 */ ptr->scalefact = 0x100; ptr->dri = 1; /* CHECKME, was 8 is 1 */ /* by default, no COM or APP markers - app should set those */ ptr->com.len = 0; ptr->app.appn = 0; ptr->app.len = 0; zr36060_init(ptr); dprintk(1, KERN_INFO "%s: codec attached and running\n", ptr->name); return 0; } static const struct videocodec zr36060_codec = { .owner = THIS_MODULE, .name = "zr36060", .magic = 0L, // magic not used .flags = CODEC_FLAG_JPEG | CODEC_FLAG_HARDWARE | CODEC_FLAG_ENCODER | CODEC_FLAG_DECODER | CODEC_FLAG_VFE, .type = CODEC_TYPE_ZR36060, .setup = zr36060_setup, // functionality .unset = zr36060_unset, .set_mode = zr36060_set_mode, .set_video = zr36060_set_video, .control = zr36060_control, // others are not used }; /* ========================================================================= HOOK IN DRIVER AS KERNEL MODULE ========================================================================= */ static int __init zr36060_init_module (void) { //dprintk(1, "zr36060 driver %s\n",ZR060_VERSION); zr36060_codecs = 0; return videocodec_register(&zr36060_codec); } static void __exit zr36060_cleanup_module (void) { if (zr36060_codecs) { dprintk(1, "zr36060: something's wrong - %d codecs left somehow.\n", zr36060_codecs); } /* however, we can't just stay alive */ videocodec_unregister(&zr36060_codec); } module_init(zr36060_init_module); module_exit(zr36060_cleanup_module); MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@skynet.be>"); MODULE_DESCRIPTION("Driver module for ZR36060 jpeg processors " ZR060_VERSION); MODULE_LICENSE("GPL");
gpl-2.0
sch2307/android_kernel_ezboard_s100
arch/s390/kvm/sigp.c
3337
7978
/* * sigp.c - handlinge interprocessor communication * * Copyright IBM Corp. 2008,2009 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License (version 2 only) * as published by the Free Software Foundation. * * Author(s): Carsten Otte <cotte@de.ibm.com> * Christian Borntraeger <borntraeger@de.ibm.com> * Christian Ehrhardt <ehrhardt@de.ibm.com> */ #include <linux/kvm.h> #include <linux/kvm_host.h> #include <linux/slab.h> #include "gaccess.h" #include "kvm-s390.h" /* sigp order codes */ #define SIGP_SENSE 0x01 #define SIGP_EXTERNAL_CALL 0x02 #define SIGP_EMERGENCY 0x03 #define SIGP_START 0x04 #define SIGP_STOP 0x05 #define SIGP_RESTART 0x06 #define SIGP_STOP_STORE_STATUS 0x09 #define SIGP_INITIAL_CPU_RESET 0x0b #define SIGP_CPU_RESET 0x0c #define SIGP_SET_PREFIX 0x0d #define SIGP_STORE_STATUS_ADDR 0x0e #define SIGP_SET_ARCH 0x12 /* cpu status bits */ #define SIGP_STAT_EQUIPMENT_CHECK 0x80000000UL #define SIGP_STAT_INCORRECT_STATE 0x00000200UL #define SIGP_STAT_INVALID_PARAMETER 0x00000100UL #define SIGP_STAT_EXT_CALL_PENDING 0x00000080UL #define SIGP_STAT_STOPPED 0x00000040UL #define SIGP_STAT_OPERATOR_INTERV 0x00000020UL #define SIGP_STAT_CHECK_STOP 0x00000010UL #define SIGP_STAT_INOPERATIVE 0x00000004UL #define SIGP_STAT_INVALID_ORDER 0x00000002UL #define SIGP_STAT_RECEIVER_CHECK 0x00000001UL static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, unsigned long *reg) { struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; int rc; if (cpu_addr >= KVM_MAX_VCPUS) return 3; /* not operational */ spin_lock(&fi->lock); if (fi->local_int[cpu_addr] == NULL) rc = 3; /* not operational */ else if (atomic_read(fi->local_int[cpu_addr]->cpuflags) & CPUSTAT_RUNNING) { *reg &= 0xffffffff00000000UL; rc = 1; /* status stored */ } else { *reg &= 0xffffffff00000000UL; *reg |= SIGP_STAT_STOPPED; rc = 1; /* status stored */ } spin_unlock(&fi->lock); VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc); return rc; } static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr) { struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; struct kvm_s390_local_interrupt *li; struct kvm_s390_interrupt_info *inti; int rc; if (cpu_addr >= KVM_MAX_VCPUS) return 3; /* not operational */ inti = kzalloc(sizeof(*inti), GFP_KERNEL); if (!inti) return -ENOMEM; inti->type = KVM_S390_INT_EMERGENCY; spin_lock(&fi->lock); li = fi->local_int[cpu_addr]; if (li == NULL) { rc = 3; /* not operational */ kfree(inti); goto unlock; } spin_lock_bh(&li->lock); list_add_tail(&inti->list, &li->list); atomic_set(&li->active, 1); atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); if (waitqueue_active(&li->wq)) wake_up_interruptible(&li->wq); spin_unlock_bh(&li->lock); rc = 0; /* order accepted */ unlock: spin_unlock(&fi->lock); VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr); return rc; } static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action) { struct kvm_s390_interrupt_info *inti; inti = kzalloc(sizeof(*inti), GFP_ATOMIC); if (!inti) return -ENOMEM; inti->type = KVM_S390_SIGP_STOP; spin_lock_bh(&li->lock); list_add_tail(&inti->list, &li->list); atomic_set(&li->active, 1); atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); li->action_bits |= action; if (waitqueue_active(&li->wq)) wake_up_interruptible(&li->wq); spin_unlock_bh(&li->lock); return 0; /* order accepted */ } static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action) { struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; struct kvm_s390_local_interrupt *li; int rc; if (cpu_addr >= KVM_MAX_VCPUS) return 3; /* not operational */ spin_lock(&fi->lock); li = fi->local_int[cpu_addr]; if (li == NULL) { rc = 3; /* not operational */ goto unlock; } rc = __inject_sigp_stop(li, action); unlock: spin_unlock(&fi->lock); VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr); return rc; } int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; return __inject_sigp_stop(li, action); } static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter) { int rc; switch (parameter & 0xff) { case 0: rc = 3; /* not operational */ break; case 1: case 2: rc = 0; /* order accepted */ break; default: rc = -EOPNOTSUPP; } return rc; } static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, unsigned long *reg) { struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; struct kvm_s390_local_interrupt *li = NULL; struct kvm_s390_interrupt_info *inti; int rc; u8 tmp; /* make sure that the new value is valid memory */ address = address & 0x7fffe000u; if ((copy_from_user(&tmp, (void __user *) (address + vcpu->arch.sie_block->gmsor) , 1)) || (copy_from_user(&tmp, (void __user *)(address + vcpu->arch.sie_block->gmsor + PAGE_SIZE), 1))) { *reg |= SIGP_STAT_INVALID_PARAMETER; return 1; /* invalid parameter */ } inti = kzalloc(sizeof(*inti), GFP_KERNEL); if (!inti) return 2; /* busy */ spin_lock(&fi->lock); if (cpu_addr < KVM_MAX_VCPUS) li = fi->local_int[cpu_addr]; if (li == NULL) { rc = 1; /* incorrect state */ *reg &= SIGP_STAT_INCORRECT_STATE; kfree(inti); goto out_fi; } spin_lock_bh(&li->lock); /* cpu must be in stopped state */ if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) { rc = 1; /* incorrect state */ *reg &= SIGP_STAT_INCORRECT_STATE; kfree(inti); goto out_li; } inti->type = KVM_S390_SIGP_SET_PREFIX; inti->prefix.address = address; list_add_tail(&inti->list, &li->list); atomic_set(&li->active, 1); if (waitqueue_active(&li->wq)) wake_up_interruptible(&li->wq); rc = 0; /* order accepted */ VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address); out_li: spin_unlock_bh(&li->lock); out_fi: spin_unlock(&fi->lock); return rc; } int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) { int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; int r3 = vcpu->arch.sie_block->ipa & 0x000f; int base2 = vcpu->arch.sie_block->ipb >> 28; int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); u32 parameter; u16 cpu_addr = vcpu->arch.guest_gprs[r3]; u8 order_code; int rc; /* sigp in userspace can exit */ if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OPERATION); order_code = disp2; if (base2) order_code += vcpu->arch.guest_gprs[base2]; if (r1 % 2) parameter = vcpu->arch.guest_gprs[r1]; else parameter = vcpu->arch.guest_gprs[r1 + 1]; switch (order_code) { case SIGP_SENSE: vcpu->stat.instruction_sigp_sense++; rc = __sigp_sense(vcpu, cpu_addr, &vcpu->arch.guest_gprs[r1]); break; case SIGP_EMERGENCY: vcpu->stat.instruction_sigp_emergency++; rc = __sigp_emergency(vcpu, cpu_addr); break; case SIGP_STOP: vcpu->stat.instruction_sigp_stop++; rc = __sigp_stop(vcpu, cpu_addr, ACTION_STOP_ON_STOP); break; case SIGP_STOP_STORE_STATUS: vcpu->stat.instruction_sigp_stop++; rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP); break; case SIGP_SET_ARCH: vcpu->stat.instruction_sigp_arch++; rc = __sigp_set_arch(vcpu, parameter); break; case SIGP_SET_PREFIX: vcpu->stat.instruction_sigp_prefix++; rc = __sigp_set_prefix(vcpu, cpu_addr, parameter, &vcpu->arch.guest_gprs[r1]); break; case SIGP_RESTART: vcpu->stat.instruction_sigp_restart++; /* user space must know about restart */ default: return -EOPNOTSUPP; } if (rc < 0) return rc; vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); vcpu->arch.sie_block->gpsw.mask |= (rc & 3ul) << 44; return 0; }
gpl-2.0
Validus-Kernel/android_kernel_oneplus_msm8974
net/core/iovec.c
3849
5763
/* * iovec manipulation routines. * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Fixes: * Andrew Lunn : Errors in iovec copying. * Pedro Roque : Added memcpy_fromiovecend and * csum_..._fromiovecend. * Andi Kleen : fixed error handling for 2.1 * Alexey Kuznetsov: 2.1 optimisations * Andi Kleen : Fix csum*fromiovecend for IPv6. */ #include <linux/errno.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/net.h> #include <linux/in6.h> #include <asm/uaccess.h> #include <asm/byteorder.h> #include <net/checksum.h> #include <net/sock.h> /* * Verify iovec. The caller must ensure that the iovec is big enough * to hold the message iovec. * * Save time not doing access_ok. copy_*_user will make this work * in any case. */ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode) { int size, ct, err; if (m->msg_namelen) { if (mode == VERIFY_READ) { void __user *namep; namep = (void __user __force *) m->msg_name; err = move_addr_to_kernel(namep, m->msg_namelen, address); if (err < 0) return err; } m->msg_name = address; } else { m->msg_name = NULL; } size = m->msg_iovlen * sizeof(struct iovec); if (copy_from_user(iov, (void __user __force *) m->msg_iov, size)) return -EFAULT; m->msg_iov = iov; err = 0; for (ct = 0; ct < m->msg_iovlen; ct++) { size_t len = iov[ct].iov_len; if (len > INT_MAX - err) { len = INT_MAX - err; iov[ct].iov_len = len; } err += len; } return err; } /* * Copy kernel to iovec. Returns -EFAULT on error. * * Note: this modifies the original iovec. */ int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len) { while (len > 0) { if (iov->iov_len) { int copy = min_t(unsigned int, iov->iov_len, len); if (copy_to_user(iov->iov_base, kdata, copy)) return -EFAULT; kdata += copy; len -= copy; iov->iov_len -= copy; iov->iov_base += copy; } iov++; } return 0; } EXPORT_SYMBOL(memcpy_toiovec); /* * Copy kernel to iovec. Returns -EFAULT on error. */ int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata, int offset, int len) { int copy; for (; len > 0; ++iov) { /* Skip over the finished iovecs */ if (unlikely(offset >= iov->iov_len)) { offset -= iov->iov_len; continue; } copy = min_t(unsigned int, iov->iov_len - offset, len); if (copy_to_user(iov->iov_base + offset, kdata, copy)) return -EFAULT; offset = 0; kdata += copy; len -= copy; } return 0; } EXPORT_SYMBOL(memcpy_toiovecend); /* * Copy iovec to kernel. Returns -EFAULT on error. * * Note: this modifies the original iovec. */ int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len) { while (len > 0) { if (iov->iov_len) { int copy = min_t(unsigned int, len, iov->iov_len); if (copy_from_user(kdata, iov->iov_base, copy)) return -EFAULT; len -= copy; kdata += copy; iov->iov_base += copy; iov->iov_len -= copy; } iov++; } return 0; } EXPORT_SYMBOL(memcpy_fromiovec); /* * Copy iovec from kernel. Returns -EFAULT on error. */ int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, int offset, int len) { /* Skip over the finished iovecs */ while (offset >= iov->iov_len) { offset -= iov->iov_len; iov++; } while (len > 0) { u8 __user *base = iov->iov_base + offset; int copy = min_t(unsigned int, len, iov->iov_len - offset); offset = 0; if (copy_from_user(kdata, base, copy)) return -EFAULT; len -= copy; kdata += copy; iov++; } return 0; } EXPORT_SYMBOL(memcpy_fromiovecend); /* * And now for the all-in-one: copy and checksum from a user iovec * directly to a datagram * Calls to csum_partial but the last must be in 32 bit chunks * * ip_build_xmit must ensure that when fragmenting only the last * call to this function will be unaligned also. */ int csum_partial_copy_fromiovecend(unsigned char *kdata, struct iovec *iov, int offset, unsigned int len, __wsum *csump) { __wsum csum = *csump; int partial_cnt = 0, err = 0; /* Skip over the finished iovecs */ while (offset >= iov->iov_len) { offset -= iov->iov_len; iov++; } while (len > 0) { u8 __user *base = iov->iov_base + offset; int copy = min_t(unsigned int, len, iov->iov_len - offset); offset = 0; /* There is a remnant from previous iov. */ if (partial_cnt) { int par_len = 4 - partial_cnt; /* iov component is too short ... */ if (par_len > copy) { if (copy_from_user(kdata, base, copy)) goto out_fault; kdata += copy; base += copy; partial_cnt += copy; len -= copy; iov++; if (len) continue; *csump = csum_partial(kdata - partial_cnt, partial_cnt, csum); goto out; } if (copy_from_user(kdata, base, par_len)) goto out_fault; csum = csum_partial(kdata - partial_cnt, 4, csum); kdata += par_len; base += par_len; copy -= par_len; len -= par_len; partial_cnt = 0; } if (len > copy) { partial_cnt = copy % 4; if (partial_cnt) { copy -= partial_cnt; if (copy_from_user(kdata + copy, base + copy, partial_cnt)) goto out_fault; } } if (copy) { csum = csum_and_copy_from_user(base, kdata, copy, csum, &err); if (err) goto out; } len -= copy + partial_cnt; kdata += copy + partial_cnt; iov++; } *csump = csum; out: return err; out_fault: err = -EFAULT; goto out; } EXPORT_SYMBOL(csum_partial_copy_fromiovecend);
gpl-2.0
Blefish/android_kernel_qcom_msm7x30
arch/um/kernel/exec.c
4361
1201
/* * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL */ #include <linux/stddef.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/ptrace.h> #include <linux/sched.h> #include <linux/slab.h> #include <asm/current.h> #include <asm/processor.h> #include <asm/uaccess.h> #include <as-layout.h> #include <mem_user.h> #include <skas.h> #include <os.h> void flush_thread(void) { void *data = NULL; int ret; arch_flush_thread(&current->thread.arch); ret = unmap(&current->mm->context.id, 0, STUB_START, 0, &data); ret = ret || unmap(&current->mm->context.id, STUB_END, host_task_size - STUB_END, 1, &data); if (ret) { printk(KERN_ERR "flush_thread - clearing address space failed, " "err = %d\n", ret); force_sig(SIGKILL, current); } get_safe_registers(current_pt_regs()->regs.gp, current_pt_regs()->regs.fp); __switch_mm(&current->mm->context.id); } void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp) { PT_REGS_IP(regs) = eip; PT_REGS_SP(regs) = esp; current->ptrace &= ~PT_DTRACE; #ifdef SUBARCH_EXECVE1 SUBARCH_EXECVE1(regs->regs); #endif } EXPORT_SYMBOL(start_thread);
gpl-2.0
htc-mirror/ville-u-ics-3.0.8-e2a40ab
drivers/infiniband/core/packer.c
4873
6451
/* * Copyright (c) 2004 Topspin Corporation. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/string.h> #include <rdma/ib_pack.h> static u64 value_read(int offset, int size, void *structure) { switch (size) { case 1: return *(u8 *) (structure + offset); case 2: return be16_to_cpup((__be16 *) (structure + offset)); case 4: return be32_to_cpup((__be32 *) (structure + offset)); case 8: return be64_to_cpup((__be64 *) (structure + offset)); default: printk(KERN_WARNING "Field size %d bits not handled\n", size * 8); return 0; } } /** * ib_pack - Pack a structure into a buffer * @desc:Array of structure field descriptions * @desc_len:Number of entries in @desc * @structure:Structure to pack from * @buf:Buffer to pack into * * ib_pack() packs a list of structure fields into a buffer, * controlled by the array of fields in @desc. */ void ib_pack(const struct ib_field *desc, int desc_len, void *structure, void *buf) { int i; for (i = 0; i < desc_len; ++i) { if (desc[i].size_bits <= 32) { int shift; u32 val; __be32 mask; __be32 *addr; shift = 32 - desc[i].offset_bits - desc[i].size_bits; if (desc[i].struct_size_bytes) val = value_read(desc[i].struct_offset_bytes, desc[i].struct_size_bytes, structure) << shift; else val = 0; mask = cpu_to_be32(((1ull << desc[i].size_bits) - 1) << shift); addr = (__be32 *) buf + desc[i].offset_words; *addr = (*addr & ~mask) | (cpu_to_be32(val) & mask); } else if (desc[i].size_bits <= 64) { int shift; u64 val; __be64 mask; __be64 *addr; shift = 64 - desc[i].offset_bits - desc[i].size_bits; if (desc[i].struct_size_bytes) val = value_read(desc[i].struct_offset_bytes, desc[i].struct_size_bytes, structure) << shift; else val = 0; mask = cpu_to_be64((~0ull >> (64 - desc[i].size_bits)) << shift); addr = (__be64 *) ((__be32 *) buf + desc[i].offset_words); *addr = (*addr & ~mask) | (cpu_to_be64(val) & mask); } else { if (desc[i].offset_bits % 8 || desc[i].size_bits % 8) { printk(KERN_WARNING "Structure field %s of size %d " "bits is not byte-aligned\n", desc[i].field_name, desc[i].size_bits); } if (desc[i].struct_size_bytes) memcpy(buf + desc[i].offset_words * 4 + desc[i].offset_bits / 8, structure + desc[i].struct_offset_bytes, desc[i].size_bits / 8); else memset(buf + desc[i].offset_words * 4 + desc[i].offset_bits / 8, 0, desc[i].size_bits / 8); } } } EXPORT_SYMBOL(ib_pack); static void value_write(int offset, int size, u64 val, void *structure) { switch (size * 8) { case 8: *( u8 *) (structure + offset) = val; break; case 16: *(__be16 *) (structure + offset) = cpu_to_be16(val); break; case 32: *(__be32 *) (structure + offset) = cpu_to_be32(val); break; case 64: *(__be64 *) (structure + offset) = cpu_to_be64(val); break; default: printk(KERN_WARNING "Field size %d bits not handled\n", size * 8); } } /** * ib_unpack - Unpack a buffer into a structure * @desc:Array of structure field descriptions * @desc_len:Number of entries in @desc * @buf:Buffer to unpack from * @structure:Structure to unpack into * * ib_pack() unpacks a list of structure fields from a buffer, * controlled by the array of fields in @desc. */ void ib_unpack(const struct ib_field *desc, int desc_len, void *buf, void *structure) { int i; for (i = 0; i < desc_len; ++i) { if (!desc[i].struct_size_bytes) continue; if (desc[i].size_bits <= 32) { int shift; u32 val; u32 mask; __be32 *addr; shift = 32 - desc[i].offset_bits - desc[i].size_bits; mask = ((1ull << desc[i].size_bits) - 1) << shift; addr = (__be32 *) buf + desc[i].offset_words; val = (be32_to_cpup(addr) & mask) >> shift; value_write(desc[i].struct_offset_bytes, desc[i].struct_size_bytes, val, structure); } else if (desc[i].size_bits <= 64) { int shift; u64 val; u64 mask; __be64 *addr; shift = 64 - desc[i].offset_bits - desc[i].size_bits; mask = (~0ull >> (64 - desc[i].size_bits)) << shift; addr = (__be64 *) buf + desc[i].offset_words; val = (be64_to_cpup(addr) & mask) >> shift; value_write(desc[i].struct_offset_bytes, desc[i].struct_size_bytes, val, structure); } else { if (desc[i].offset_bits % 8 || desc[i].size_bits % 8) { printk(KERN_WARNING "Structure field %s of size %d " "bits is not byte-aligned\n", desc[i].field_name, desc[i].size_bits); } memcpy(structure + desc[i].struct_offset_bytes, buf + desc[i].offset_words * 4 + desc[i].offset_bits / 8, desc[i].size_bits / 8); } } } EXPORT_SYMBOL(ib_unpack);
gpl-2.0
ankurmittal/CSE-506---Operating-Systems
drivers/infiniband/core/cache.c
5641
10349
/* * Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2005 Intel Corporation. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2005 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/module.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <rdma/ib_cache.h> #include "core_priv.h" struct ib_pkey_cache { int table_len; u16 table[0]; }; struct ib_gid_cache { int table_len; union ib_gid table[0]; }; struct ib_update_work { struct work_struct work; struct ib_device *device; u8 port_num; }; static inline int start_port(struct ib_device *device) { return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1; } static inline int end_port(struct ib_device *device) { return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : device->phys_port_cnt; } int ib_get_cached_gid(struct ib_device *device, u8 port_num, int index, union ib_gid *gid) { struct ib_gid_cache *cache; unsigned long flags; int ret = 0; if (port_num < start_port(device) || port_num > end_port(device)) return -EINVAL; read_lock_irqsave(&device->cache.lock, flags); cache = device->cache.gid_cache[port_num - start_port(device)]; if (index < 0 || index >= cache->table_len) ret = -EINVAL; else *gid = cache->table[index]; read_unlock_irqrestore(&device->cache.lock, flags); return ret; } EXPORT_SYMBOL(ib_get_cached_gid); int ib_find_cached_gid(struct ib_device *device, union ib_gid *gid, u8 *port_num, u16 *index) { struct ib_gid_cache *cache; unsigned long flags; int p, i; int ret = -ENOENT; *port_num = -1; if (index) *index = -1; read_lock_irqsave(&device->cache.lock, flags); for (p = 0; p <= end_port(device) - start_port(device); ++p) { cache = device->cache.gid_cache[p]; for (i = 0; i < cache->table_len; ++i) { if (!memcmp(gid, &cache->table[i], sizeof *gid)) { *port_num = p + start_port(device); if (index) *index = i; ret = 0; goto found; } } } found: read_unlock_irqrestore(&device->cache.lock, flags); return ret; } EXPORT_SYMBOL(ib_find_cached_gid); int ib_get_cached_pkey(struct ib_device *device, u8 port_num, int index, u16 *pkey) { struct ib_pkey_cache *cache; unsigned long flags; int ret = 0; if (port_num < start_port(device) || port_num > end_port(device)) return -EINVAL; read_lock_irqsave(&device->cache.lock, flags); cache = device->cache.pkey_cache[port_num - start_port(device)]; if (index < 0 || index >= cache->table_len) ret = -EINVAL; else *pkey = cache->table[index]; read_unlock_irqrestore(&device->cache.lock, flags); return ret; } EXPORT_SYMBOL(ib_get_cached_pkey); int ib_find_cached_pkey(struct ib_device *device, u8 port_num, u16 pkey, u16 *index) { struct ib_pkey_cache *cache; unsigned long flags; int i; int ret = -ENOENT; if (port_num < start_port(device) || port_num > end_port(device)) return -EINVAL; read_lock_irqsave(&device->cache.lock, flags); cache = device->cache.pkey_cache[port_num - start_port(device)]; *index = -1; for (i = 0; i < cache->table_len; ++i) if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) { *index = i; ret = 0; break; } read_unlock_irqrestore(&device->cache.lock, flags); return ret; } EXPORT_SYMBOL(ib_find_cached_pkey); int ib_get_cached_lmc(struct ib_device *device, u8 port_num, u8 *lmc) { unsigned long flags; int ret = 0; if (port_num < start_port(device) || port_num > end_port(device)) return -EINVAL; read_lock_irqsave(&device->cache.lock, flags); *lmc = device->cache.lmc_cache[port_num - start_port(device)]; read_unlock_irqrestore(&device->cache.lock, flags); return ret; } EXPORT_SYMBOL(ib_get_cached_lmc); static void ib_cache_update(struct ib_device *device, u8 port) { struct ib_port_attr *tprops = NULL; struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache; struct ib_gid_cache *gid_cache = NULL, *old_gid_cache; int i; int ret; tprops = kmalloc(sizeof *tprops, GFP_KERNEL); if (!tprops) return; ret = ib_query_port(device, port, tprops); if (ret) { printk(KERN_WARNING "ib_query_port failed (%d) for %s\n", ret, device->name); goto err; } pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len * sizeof *pkey_cache->table, GFP_KERNEL); if (!pkey_cache) goto err; pkey_cache->table_len = tprops->pkey_tbl_len; gid_cache = kmalloc(sizeof *gid_cache + tprops->gid_tbl_len * sizeof *gid_cache->table, GFP_KERNEL); if (!gid_cache) goto err; gid_cache->table_len = tprops->gid_tbl_len; for (i = 0; i < pkey_cache->table_len; ++i) { ret = ib_query_pkey(device, port, i, pkey_cache->table + i); if (ret) { printk(KERN_WARNING "ib_query_pkey failed (%d) for %s (index %d)\n", ret, device->name, i); goto err; } } for (i = 0; i < gid_cache->table_len; ++i) { ret = ib_query_gid(device, port, i, gid_cache->table + i); if (ret) { printk(KERN_WARNING "ib_query_gid failed (%d) for %s (index %d)\n", ret, device->name, i); goto err; } } write_lock_irq(&device->cache.lock); old_pkey_cache = device->cache.pkey_cache[port - start_port(device)]; old_gid_cache = device->cache.gid_cache [port - start_port(device)]; device->cache.pkey_cache[port - start_port(device)] = pkey_cache; device->cache.gid_cache [port - start_port(device)] = gid_cache; device->cache.lmc_cache[port - start_port(device)] = tprops->lmc; write_unlock_irq(&device->cache.lock); kfree(old_pkey_cache); kfree(old_gid_cache); kfree(tprops); return; err: kfree(pkey_cache); kfree(gid_cache); kfree(tprops); } static void ib_cache_task(struct work_struct *_work) { struct ib_update_work *work = container_of(_work, struct ib_update_work, work); ib_cache_update(work->device, work->port_num); kfree(work); } static void ib_cache_event(struct ib_event_handler *handler, struct ib_event *event) { struct ib_update_work *work; if (event->event == IB_EVENT_PORT_ERR || event->event == IB_EVENT_PORT_ACTIVE || event->event == IB_EVENT_LID_CHANGE || event->event == IB_EVENT_PKEY_CHANGE || event->event == IB_EVENT_SM_CHANGE || event->event == IB_EVENT_CLIENT_REREGISTER || event->event == IB_EVENT_GID_CHANGE) { work = kmalloc(sizeof *work, GFP_ATOMIC); if (work) { INIT_WORK(&work->work, ib_cache_task); work->device = event->device; work->port_num = event->element.port_num; queue_work(ib_wq, &work->work); } } } static void ib_cache_setup_one(struct ib_device *device) { int p; rwlock_init(&device->cache.lock); device->cache.pkey_cache = kmalloc(sizeof *device->cache.pkey_cache * (end_port(device) - start_port(device) + 1), GFP_KERNEL); device->cache.gid_cache = kmalloc(sizeof *device->cache.gid_cache * (end_port(device) - start_port(device) + 1), GFP_KERNEL); device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache * (end_port(device) - start_port(device) + 1), GFP_KERNEL); if (!device->cache.pkey_cache || !device->cache.gid_cache || !device->cache.lmc_cache) { printk(KERN_WARNING "Couldn't allocate cache " "for %s\n", device->name); goto err; } for (p = 0; p <= end_port(device) - start_port(device); ++p) { device->cache.pkey_cache[p] = NULL; device->cache.gid_cache [p] = NULL; ib_cache_update(device, p + start_port(device)); } INIT_IB_EVENT_HANDLER(&device->cache.event_handler, device, ib_cache_event); if (ib_register_event_handler(&device->cache.event_handler)) goto err_cache; return; err_cache: for (p = 0; p <= end_port(device) - start_port(device); ++p) { kfree(device->cache.pkey_cache[p]); kfree(device->cache.gid_cache[p]); } err: kfree(device->cache.pkey_cache); kfree(device->cache.gid_cache); kfree(device->cache.lmc_cache); } static void ib_cache_cleanup_one(struct ib_device *device) { int p; ib_unregister_event_handler(&device->cache.event_handler); flush_workqueue(ib_wq); for (p = 0; p <= end_port(device) - start_port(device); ++p) { kfree(device->cache.pkey_cache[p]); kfree(device->cache.gid_cache[p]); } kfree(device->cache.pkey_cache); kfree(device->cache.gid_cache); kfree(device->cache.lmc_cache); } static struct ib_client cache_client = { .name = "cache", .add = ib_cache_setup_one, .remove = ib_cache_cleanup_one }; int __init ib_cache_setup(void) { return ib_register_client(&cache_client); } void __exit ib_cache_cleanup(void) { ib_unregister_client(&cache_client); }
gpl-2.0
Lime1iME/Testing
drivers/staging/sbe-2t3e3/module.c
8201
5388
/* * SBE 2T3E3 synchronous serial card driver for Linux * * Copyright (C) 2009-2010 Krzysztof Halasa <khc@pm.waw.pl> * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License * as published by the Free Software Foundation. * * This code is based on a driver written by SBE Inc. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/hdlc.h> #include <linux/if_arp.h> #include <linux/interrupt.h> #include "2t3e3.h" static void check_leds(unsigned long arg) { struct card *card = (struct card *)arg; struct channel *channel0 = &card->channels[0]; static int blinker; update_led(channel0, ++blinker); if (has_two_ports(channel0->pdev)) update_led(&card->channels[1], blinker); card->timer.expires = jiffies + HZ / 10; add_timer(&card->timer); } static void t3e3_remove_channel(struct channel *channel) { struct pci_dev *pdev = channel->pdev; struct net_device *dev = channel->dev; /* system hangs if board asserts irq while module is unloaded */ cpld_stop_intr(channel); free_irq(dev->irq, dev); dc_drop_descriptor_list(channel); unregister_hdlc_device(dev); free_netdev(dev); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } static int __devinit t3e3_init_channel(struct channel *channel, struct pci_dev *pdev, struct card *card) { struct net_device *dev; unsigned int val; int err; err = pci_enable_device(pdev); if (err) return err; err = pci_request_regions(pdev, "SBE 2T3E3"); if (err) goto disable; dev = alloc_hdlcdev(channel); if (!dev) { printk(KERN_ERR "SBE 2T3E3" ": Out of memory\n"); goto free_regions; } t3e3_sc_init(channel); dev_to_priv(dev) = channel; channel->pdev = pdev; channel->dev = dev; channel->card = card; channel->addr = pci_resource_start(pdev, 0); if (pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P1) channel->h.slot = 1; else channel->h.slot = 0; if (setup_device(dev, channel)) goto free_regions; pci_read_config_dword(channel->pdev, 0x40, &val); /* mask sleep mode */ pci_write_config_dword(channel->pdev, 0x40, val & 0x3FFFFFFF); pci_read_config_byte(channel->pdev, PCI_CACHE_LINE_SIZE, &channel->h.cache_size); pci_read_config_dword(channel->pdev, PCI_COMMAND, &channel->h.command); t3e3_init(channel); if (request_irq(dev->irq, &t3e3_intr, IRQF_SHARED, dev->name, dev)) { printk(KERN_WARNING "%s: could not get irq: %d\n", dev->name, dev->irq); goto free_regions; } pci_set_drvdata(pdev, channel); return 0; free_regions: pci_release_regions(pdev); disable: pci_disable_device(pdev); return err; } static void __devexit t3e3_remove_card(struct pci_dev *pdev) { struct channel *channel0 = pci_get_drvdata(pdev); struct card *card = channel0->card; del_timer(&card->timer); if (has_two_ports(channel0->pdev)) { t3e3_remove_channel(&card->channels[1]); pci_dev_put(card->channels[1].pdev); } t3e3_remove_channel(channel0); kfree(card); } static int __devinit t3e3_init_card(struct pci_dev *pdev, const struct pci_device_id *ent) { /* pdev points to channel #0 */ struct pci_dev *pdev1 = NULL; struct card *card; int channels = 1, err; if (has_two_ports(pdev)) { while ((pdev1 = pci_get_subsys(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21142, PCI_VENDOR_ID_SBE, PCI_SUBDEVICE_ID_SBE_2T3E3_P1, pdev1))) if (pdev1->bus == pdev->bus && pdev1->devfn == pdev->devfn + 8 /* next device on the same bus */) break; /* found the second channel */ if (!pdev1) { printk(KERN_ERR "SBE 2T3E3" ": Can't find the second channel\n"); return -EFAULT; } channels = 2; /* holds the reference for pdev1 */ } card = kzalloc(sizeof(struct card) + channels * sizeof(struct channel), GFP_KERNEL); if (!card) { printk(KERN_ERR "SBE 2T3E3" ": Out of memory\n"); return -ENOBUFS; } spin_lock_init(&card->bootrom_lock); card->bootrom_addr = pci_resource_start(pdev, 0); err = t3e3_init_channel(&card->channels[0], pdev, card); if (err) goto free_card; if (channels == 2) { err = t3e3_init_channel(&card->channels[1], pdev1, card); if (err) { t3e3_remove_channel(&card->channels[0]); goto free_card; } } /* start LED timer */ init_timer(&card->timer); card->timer.function = check_leds; card->timer.expires = jiffies + HZ / 10; card->timer.data = (unsigned long)card; add_timer(&card->timer); return 0; free_card: kfree(card); return err; } static struct pci_device_id t3e3_pci_tbl[] __devinitdata = { { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21142, PCI_VENDOR_ID_SBE, PCI_SUBDEVICE_ID_SBE_T3E3, 0, 0, 0 }, { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21142, PCI_VENDOR_ID_SBE, PCI_SUBDEVICE_ID_SBE_2T3E3_P0, 0, 0, 0 }, /* channel 1 will be initialized after channel 0 */ { 0, } }; static struct pci_driver t3e3_pci_driver = { .name = "SBE T3E3", .id_table = t3e3_pci_tbl, .probe = t3e3_init_card, .remove = t3e3_remove_card, }; static int __init t3e3_init_module(void) { return pci_register_driver(&t3e3_pci_driver); } static void __exit t3e3_cleanup_module(void) { pci_unregister_driver(&t3e3_pci_driver); } module_init(t3e3_init_module); module_exit(t3e3_cleanup_module); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, t3e3_pci_tbl);
gpl-2.0
Androguide/android_kernel_samsung_smdk4412
arch/sh/boards/mach-se/7780/setup.c
8457
3051
/* * linux/arch/sh/boards/se/7780/setup.c * * Copyright (C) 2006,2007 Nobuhiro Iwamatsu * * Hitachi UL SolutionEngine 7780 Support. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/platform_device.h> #include <asm/machvec.h> #include <mach-se/mach/se7780.h> #include <asm/io.h> #include <asm/heartbeat.h> /* Heartbeat */ static struct resource heartbeat_resource = { .start = PA_LED, .end = PA_LED, .flags = IORESOURCE_MEM | IORESOURCE_MEM_16BIT, }; static struct platform_device heartbeat_device = { .name = "heartbeat", .id = -1, .num_resources = 1, .resource = &heartbeat_resource, }; /* SMC91x */ static struct resource smc91x_eth_resources[] = { [0] = { .name = "smc91x-regs" , .start = PA_LAN + 0x300, .end = PA_LAN + 0x300 + 0x10 , .flags = IORESOURCE_MEM, }, [1] = { .start = SMC_IRQ, .end = SMC_IRQ, .flags = IORESOURCE_IRQ, }, }; static struct platform_device smc91x_eth_device = { .name = "smc91x", .id = 0, .dev = { .dma_mask = NULL, /* don't use dma */ .coherent_dma_mask = 0xffffffff, }, .num_resources = ARRAY_SIZE(smc91x_eth_resources), .resource = smc91x_eth_resources, }; static struct platform_device *se7780_devices[] __initdata = { &heartbeat_device, &smc91x_eth_device, }; static int __init se7780_devices_setup(void) { return platform_add_devices(se7780_devices, ARRAY_SIZE(se7780_devices)); } device_initcall(se7780_devices_setup); #define GPIO_PHCR 0xFFEA000E #define GPIO_PMSELR 0xFFEA0080 #define GPIO_PECR 0xFFEA0008 static void __init se7780_setup(char **cmdline_p) { /* "SH-Linux" on LED Display */ __raw_writew( 'S' , PA_LED_DISP + (DISP_SEL0_ADDR << 1) ); __raw_writew( 'H' , PA_LED_DISP + (DISP_SEL1_ADDR << 1) ); __raw_writew( '-' , PA_LED_DISP + (DISP_SEL2_ADDR << 1) ); __raw_writew( 'L' , PA_LED_DISP + (DISP_SEL3_ADDR << 1) ); __raw_writew( 'i' , PA_LED_DISP + (DISP_SEL4_ADDR << 1) ); __raw_writew( 'n' , PA_LED_DISP + (DISP_SEL5_ADDR << 1) ); __raw_writew( 'u' , PA_LED_DISP + (DISP_SEL6_ADDR << 1) ); __raw_writew( 'x' , PA_LED_DISP + (DISP_SEL7_ADDR << 1) ); printk(KERN_INFO "Hitachi UL Solutions Engine 7780SE03 support.\n"); /* * PCI REQ/GNT setting * REQ0/GNT0 -> USB * REQ1/GNT1 -> PC Card * REQ2/GNT2 -> Serial ATA * REQ3/GNT3 -> PCI slot */ __raw_writew(0x0213, FPGA_REQSEL); /* GPIO setting */ __raw_writew(0x0000, GPIO_PECR); __raw_writew(__raw_readw(GPIO_PHCR)&0xfff3, GPIO_PHCR); __raw_writew(0x0c00, GPIO_PMSELR); /* iVDR Power ON */ __raw_writew(0x0001, FPGA_IVDRPW); } /* * The Machine Vector */ static struct sh_machine_vector mv_se7780 __initmv = { .mv_name = "Solution Engine 7780" , .mv_setup = se7780_setup , .mv_nr_irqs = 111 , .mv_init_irq = init_se7780_IRQ, };
gpl-2.0
MoKee/android_kernel_samsung_sc03e
drivers/scsi/NCR_D700.c
9225
10648
/* -*- mode: c; c-basic-offset: 8 -*- */ /* NCR Dual 700 MCA SCSI Driver * * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com **----------------------------------------------------------------------------- ** ** This program is free software; you can redistribute it and/or modify ** it under the terms of the GNU General Public License as published by ** the Free Software Foundation; either version 2 of the License, or ** (at your option) any later version. ** ** This program is distributed in the hope that it will be useful, ** but WITHOUT ANY WARRANTY; without even the implied warranty of ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** GNU General Public License for more details. ** ** You should have received a copy of the GNU General Public License ** along with this program; if not, write to the Free Software ** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ** **----------------------------------------------------------------------------- */ /* Notes: * * Most of the work is done in the chip specific module, 53c700.o * * TODO List: * * 1. Extract the SCSI ID from the voyager CMOS table (necessary to * support multi-host environments. * * */ /* CHANGELOG * * Version 2.2 * * Added mca_set_adapter_name(). * * Version 2.1 * * Modularise the driver into a Board piece (this file) and a chip * piece 53c700.[ch] and 53c700.scr, added module options. You can * now specify the scsi id by the parameters * * NCR_D700=slot:<n> [siop:<n>] id:<n> .... * * They need to be comma separated if compiled into the kernel * * Version 2.0 * * Initial implementation of TCQ (Tag Command Queueing). TCQ is full * featured and uses the clock algorithm to keep track of outstanding * tags and guard against individual tag starvation. Also fixed a bug * in all of the 1.x versions where the D700_data_residue() function * was returning results off by 32 bytes (and thus causing the same 32 * bytes to be written twice corrupting the data block). It turns out * the 53c700 only has a 6 bit DBC and DFIFO registers not 7 bit ones * like the 53c710 (The 710 is the only data manual still available, * which I'd been using to program the 700). * * Version 1.2 * * Much improved message handling engine * * Version 1.1 * * Add code to handle selection reasonably correctly. By the time we * get the selection interrupt, we've already responded, but drop off the * bus and hope the selector will go away. * * Version 1.0: * * Initial release. Fully functional except for procfs and tag * command queueing. Has only been tested on cards with 53c700-66 * chips and only single ended. Features are * * 1. Synchronous data transfers to offset 8 (limit of 700-66) and * 100ns (10MHz) limit of SCSI-2 * * 2. Disconnection and reselection * * Testing: * * I've only really tested this with the 700-66 chip, but have done * soak tests in multi-device environments to verify that * disconnections and reselections are being processed correctly. * */ #define NCR_D700_VERSION "2.2" #include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mca.h> #include <linux/slab.h> #include <asm/io.h> #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_spi.h> #include "53c700.h" #include "NCR_D700.h" static char *NCR_D700; /* command line from insmod */ MODULE_AUTHOR("James Bottomley"); MODULE_DESCRIPTION("NCR Dual700 SCSI Driver"); MODULE_LICENSE("GPL"); module_param(NCR_D700, charp, 0); static __u8 __devinitdata id_array[2*(MCA_MAX_SLOT_NR + 1)] = { [0 ... 2*(MCA_MAX_SLOT_NR + 1)-1] = 7 }; #ifdef MODULE #define ARG_SEP ' ' #else #define ARG_SEP ',' #endif static int __init param_setup(char *string) { char *pos = string, *next; int slot = -1, siop = -1; while(pos != NULL && (next = strchr(pos, ':')) != NULL) { int val = (int)simple_strtoul(++next, NULL, 0); if(!strncmp(pos, "slot:", 5)) slot = val; else if(!strncmp(pos, "siop:", 5)) siop = val; else if(!strncmp(pos, "id:", 3)) { if(slot == -1) { printk(KERN_WARNING "NCR D700: Must specify slot for id parameter\n"); } else if(slot > MCA_MAX_SLOT_NR) { printk(KERN_WARNING "NCR D700: Illegal slot %d for id %d\n", slot, val); } else { if(siop != 0 && siop != 1) { id_array[slot*2] = val; id_array[slot*2 + 1] =val; } else { id_array[slot*2 + siop] = val; } } } if((pos = strchr(pos, ARG_SEP)) != NULL) pos++; } return 1; } /* Host template. The 53c700 routine NCR_700_detect will * fill in all of the missing routines */ static struct scsi_host_template NCR_D700_driver_template = { .module = THIS_MODULE, .name = "NCR Dual 700 MCA", .proc_name = "NCR_D700", .this_id = 7, }; /* We needs this helper because we have two hosts per struct device */ struct NCR_D700_private { struct device *dev; struct Scsi_Host *hosts[2]; char name[30]; char pad; }; static int __devinit NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq, int slot, u32 region, int differential) { struct NCR_700_Host_Parameters *hostdata; struct Scsi_Host *host; int ret; hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL); if (!hostdata) { printk(KERN_ERR "NCR D700: SIOP%d: Failed to allocate host" "data, detatching\n", siop); return -ENOMEM; } if (!request_region(region, 64, "NCR_D700")) { printk(KERN_ERR "NCR D700: Failed to reserve IO region 0x%x\n", region); ret = -ENODEV; goto region_failed; } /* Fill in the three required pieces of hostdata */ hostdata->base = ioport_map(region, 64); hostdata->differential = (((1<<siop) & differential) != 0); hostdata->clock = NCR_D700_CLOCK_MHZ; hostdata->burst_length = 8; /* and register the siop */ host = NCR_700_detect(&NCR_D700_driver_template, hostdata, p->dev); if (!host) { ret = -ENOMEM; goto detect_failed; } p->hosts[siop] = host; /* FIXME: read this from SUS */ host->this_id = id_array[slot * 2 + siop]; host->irq = irq; host->base = region; scsi_scan_host(host); return 0; detect_failed: release_region(region, 64); region_failed: kfree(hostdata); return ret; } static irqreturn_t NCR_D700_intr(int irq, void *data) { struct NCR_D700_private *p = (struct NCR_D700_private *)data; int i, found = 0; for (i = 0; i < 2; i++) if (p->hosts[i] && NCR_700_intr(irq, p->hosts[i]) == IRQ_HANDLED) found++; return found ? IRQ_HANDLED : IRQ_NONE; } /* Detect a D700 card. Note, because of the setup --- the chips are * essentially connectecd to the MCA bus independently, it is easier * to set them up as two separate host adapters, rather than one * adapter with two channels */ static int __devinit NCR_D700_probe(struct device *dev) { struct NCR_D700_private *p; int differential; static int banner = 1; struct mca_device *mca_dev = to_mca_device(dev); int slot = mca_dev->slot; int found = 0; int irq, i; int pos3j, pos3k, pos3a, pos3b, pos4; __u32 base_addr, offset_addr; /* enable board interrupt */ pos4 = mca_device_read_pos(mca_dev, 4); pos4 |= 0x4; mca_device_write_pos(mca_dev, 4, pos4); mca_device_write_pos(mca_dev, 6, 9); pos3j = mca_device_read_pos(mca_dev, 3); mca_device_write_pos(mca_dev, 6, 10); pos3k = mca_device_read_pos(mca_dev, 3); mca_device_write_pos(mca_dev, 6, 0); pos3a = mca_device_read_pos(mca_dev, 3); mca_device_write_pos(mca_dev, 6, 1); pos3b = mca_device_read_pos(mca_dev, 3); base_addr = ((pos3j << 8) | pos3k) & 0xfffffff0; offset_addr = ((pos3a << 8) | pos3b) & 0xffffff70; irq = (pos4 & 0x3) + 11; if(irq >= 13) irq++; if(banner) { printk(KERN_NOTICE "NCR D700: Driver Version " NCR_D700_VERSION "\n" "NCR D700: Copyright (c) 2001 by James.Bottomley@HansenPartnership.com\n" "NCR D700:\n"); banner = 0; } /* now do the bus related transforms */ irq = mca_device_transform_irq(mca_dev, irq); base_addr = mca_device_transform_ioport(mca_dev, base_addr); offset_addr = mca_device_transform_ioport(mca_dev, offset_addr); printk(KERN_NOTICE "NCR D700: found in slot %d irq = %d I/O base = 0x%x\n", slot, irq, offset_addr); /*outb(BOARD_RESET, base_addr);*/ /* clear any pending interrupts */ (void)inb(base_addr + 0x08); /* get modctl, used later for setting diff bits */ switch(differential = (inb(base_addr + 0x08) >> 6)) { case 0x00: /* only SIOP1 differential */ differential = 0x02; break; case 0x01: /* Both SIOPs differential */ differential = 0x03; break; case 0x03: /* No SIOPs differential */ differential = 0x00; break; default: printk(KERN_ERR "D700: UNEXPECTED DIFFERENTIAL RESULT 0x%02x\n", differential); differential = 0x00; break; } p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) return -ENOMEM; p->dev = dev; snprintf(p->name, sizeof(p->name), "D700(%s)", dev_name(dev)); if (request_irq(irq, NCR_D700_intr, IRQF_SHARED, p->name, p)) { printk(KERN_ERR "D700: request_irq failed\n"); kfree(p); return -EBUSY; } /* plumb in both 700 chips */ for (i = 0; i < 2; i++) { int err; if ((err = NCR_D700_probe_one(p, i, irq, slot, offset_addr + (0x80 * i), differential)) != 0) printk("D700: SIOP%d: probe failed, error = %d\n", i, err); else found++; } if (!found) { kfree(p); return -ENODEV; } mca_device_set_claim(mca_dev, 1); mca_device_set_name(mca_dev, "NCR_D700"); dev_set_drvdata(dev, p); return 0; } static void __devexit NCR_D700_remove_one(struct Scsi_Host *host) { scsi_remove_host(host); NCR_700_release(host); kfree((struct NCR_700_Host_Parameters *)host->hostdata[0]); free_irq(host->irq, host); release_region(host->base, 64); } static int __devexit NCR_D700_remove(struct device *dev) { struct NCR_D700_private *p = dev_get_drvdata(dev); int i; for (i = 0; i < 2; i++) NCR_D700_remove_one(p->hosts[i]); kfree(p); return 0; } static short NCR_D700_id_table[] = { NCR_D700_MCA_ID, 0 }; static struct mca_driver NCR_D700_driver = { .id_table = NCR_D700_id_table, .driver = { .name = "NCR_D700", .bus = &mca_bus_type, .probe = NCR_D700_probe, .remove = __devexit_p(NCR_D700_remove), }, }; static int __init NCR_D700_init(void) { #ifdef MODULE if (NCR_D700) param_setup(NCR_D700); #endif return mca_register_driver(&NCR_D700_driver); } static void __exit NCR_D700_exit(void) { mca_unregister_driver(&NCR_D700_driver); } module_init(NCR_D700_init); module_exit(NCR_D700_exit); __setup("NCR_D700=", param_setup);
gpl-2.0
c2h2/aria-imx6-kernel
arch/arm/mach-pxa/pm.c
9481
2651
/* * PXA250/210 Power Management Routines * * Original code for the SA11x0: * Copyright (c) 2001 Cliff Brake <cbrake@accelent.com> * * Modified for the PXA250 by Nicolas Pitre: * Copyright (c) 2002 Monta Vista Software, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License. */ #include <linux/init.h> #include <linux/module.h> #include <linux/suspend.h> #include <linux/errno.h> #include <linux/slab.h> #include <mach/pm.h> struct pxa_cpu_pm_fns *pxa_cpu_pm_fns; static unsigned long *sleep_save; int pxa_pm_enter(suspend_state_t state) { unsigned long sleep_save_checksum = 0, checksum = 0; int i; #ifdef CONFIG_IWMMXT /* force any iWMMXt context to ram **/ if (elf_hwcap & HWCAP_IWMMXT) iwmmxt_task_disable(NULL); #endif /* skip registers saving for standby */ if (state != PM_SUSPEND_STANDBY && pxa_cpu_pm_fns->save) { pxa_cpu_pm_fns->save(sleep_save); /* before sleeping, calculate and save a checksum */ for (i = 0; i < pxa_cpu_pm_fns->save_count - 1; i++) sleep_save_checksum += sleep_save[i]; } /* *** go zzz *** */ pxa_cpu_pm_fns->enter(state); if (state != PM_SUSPEND_STANDBY && pxa_cpu_pm_fns->restore) { /* after sleeping, validate the checksum */ for (i = 0; i < pxa_cpu_pm_fns->save_count - 1; i++) checksum += sleep_save[i]; /* if invalid, display message and wait for a hardware reset */ if (checksum != sleep_save_checksum) { lubbock_set_hexled(0xbadbadc5); while (1) pxa_cpu_pm_fns->enter(state); } pxa_cpu_pm_fns->restore(sleep_save); } pr_debug("*** made it back from resume\n"); return 0; } EXPORT_SYMBOL_GPL(pxa_pm_enter); static int pxa_pm_valid(suspend_state_t state) { if (pxa_cpu_pm_fns) return pxa_cpu_pm_fns->valid(state); return -EINVAL; } int pxa_pm_prepare(void) { int ret = 0; if (pxa_cpu_pm_fns && pxa_cpu_pm_fns->prepare) ret = pxa_cpu_pm_fns->prepare(); return ret; } void pxa_pm_finish(void) { if (pxa_cpu_pm_fns && pxa_cpu_pm_fns->finish) pxa_cpu_pm_fns->finish(); } static const struct platform_suspend_ops pxa_pm_ops = { .valid = pxa_pm_valid, .enter = pxa_pm_enter, .prepare = pxa_pm_prepare, .finish = pxa_pm_finish, }; static int __init pxa_pm_init(void) { if (!pxa_cpu_pm_fns) { printk(KERN_ERR "no valid pxa_cpu_pm_fns defined\n"); return -EINVAL; } sleep_save = kmalloc(pxa_cpu_pm_fns->save_count * sizeof(unsigned long), GFP_KERNEL); if (!sleep_save) { printk(KERN_ERR "failed to alloc memory for pm save\n"); return -ENOMEM; } suspend_set_ops(&pxa_pm_ops); return 0; } device_initcall(pxa_pm_init);
gpl-2.0
Fuzion24/m7_vzw_kernel
drivers/macintosh/via-maciisi.c
9737
15772
/* * Device driver for the IIsi-style ADB on some Mac LC and II-class machines * * Based on via-cuda.c and via-macii.c, as well as the original * adb-bus.c, which in turn is somewhat influenced by (but uses no * code from) the NetBSD HWDIRECT ADB code. Original IIsi driver work * was done by Robert Thompson and integrated into the old style * driver by Michael Schmitz. * * Original sources (c) Alan Cox, Paul Mackerras, and others. * * Rewritten for Unified ADB by David Huggins-Daines <dhd@debian.org> * * 7/13/2000- extensive changes by Andrew McPherson <andrew@macduff.dhs.org> * Works about 30% of the time now. */ #include <linux/types.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/adb.h> #include <linux/cuda.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <asm/macintosh.h> #include <asm/macints.h> #include <asm/mac_via.h> static volatile unsigned char *via; /* VIA registers - spaced 0x200 bytes apart - only the ones we actually use */ #define RS 0x200 /* skip between registers */ #define B 0 /* B-side data */ #define A RS /* A-side data */ #define DIRB (2*RS) /* B-side direction (1=output) */ #define DIRA (3*RS) /* A-side direction (1=output) */ #define SR (10*RS) /* Shift register */ #define ACR (11*RS) /* Auxiliary control register */ #define IFR (13*RS) /* Interrupt flag register */ #define IER (14*RS) /* Interrupt enable register */ /* Bits in B data register: all active low */ #define TREQ 0x08 /* Transfer request (input) */ #define TACK 0x10 /* Transfer acknowledge (output) */ #define TIP 0x20 /* Transfer in progress (output) */ #define ST_MASK 0x30 /* mask for selecting ADB state bits */ /* Bits in ACR */ #define SR_CTRL 0x1c /* Shift register control bits */ #define SR_EXT 0x0c /* Shift on external clock */ #define SR_OUT 0x10 /* Shift out if 1 */ /* Bits in IFR and IER */ #define IER_SET 0x80 /* set bits in IER */ #define IER_CLR 0 /* clear bits in IER */ #define SR_INT 0x04 /* Shift register full/empty */ #define SR_DATA 0x08 /* Shift register data */ #define SR_CLOCK 0x10 /* Shift register clock */ #define ADB_DELAY 150 #undef DEBUG_MACIISI_ADB static struct adb_request* current_req; static struct adb_request* last_req; static unsigned char maciisi_rbuf[16]; static unsigned char *reply_ptr; static int data_index; static int reading_reply; static int reply_len; static int tmp; static int need_sync; static enum maciisi_state { idle, sending, reading, } maciisi_state; static int maciisi_probe(void); static int maciisi_init(void); static int maciisi_send_request(struct adb_request* req, int sync); static void maciisi_sync(struct adb_request *req); static int maciisi_write(struct adb_request* req); static irqreturn_t maciisi_interrupt(int irq, void* arg); static void maciisi_input(unsigned char *buf, int nb); static int maciisi_init_via(void); static void maciisi_poll(void); static int maciisi_start(void); struct adb_driver via_maciisi_driver = { "Mac IIsi", maciisi_probe, maciisi_init, maciisi_send_request, NULL, /* maciisi_adb_autopoll, */ maciisi_poll, NULL /* maciisi_reset_adb_bus */ }; static int maciisi_probe(void) { if (macintosh_config->adb_type != MAC_ADB_IISI) return -ENODEV; via = via1; return 0; } static int maciisi_init(void) { int err; if (via == NULL) return -ENODEV; if ((err = maciisi_init_via())) { printk(KERN_ERR "maciisi_init: maciisi_init_via() failed, code %d\n", err); via = NULL; return err; } if (request_irq(IRQ_MAC_ADB, maciisi_interrupt, 0, "ADB", maciisi_interrupt)) { printk(KERN_ERR "maciisi_init: can't get irq %d\n", IRQ_MAC_ADB); return -EAGAIN; } printk("adb: Mac IIsi driver v0.2 for Unified ADB.\n"); return 0; } /* Flush data from the ADB controller */ static void maciisi_stfu(void) { int status = via[B] & (TIP|TREQ); if (status & TREQ) { #ifdef DEBUG_MACIISI_ADB printk (KERN_DEBUG "maciisi_stfu called with TREQ high!\n"); #endif return; } udelay(ADB_DELAY); via[ACR] &= ~SR_OUT; via[IER] = IER_CLR | SR_INT; udelay(ADB_DELAY); status = via[B] & (TIP|TREQ); if (!(status & TREQ)) { via[B] |= TIP; while(1) { int poll_timeout = ADB_DELAY * 5; /* Poll for SR interrupt */ while (!(via[IFR] & SR_INT) && poll_timeout-- > 0) status = via[B] & (TIP|TREQ); tmp = via[SR]; /* Clear shift register */ #ifdef DEBUG_MACIISI_ADB printk(KERN_DEBUG "maciisi_stfu: status %x timeout %d data %x\n", status, poll_timeout, tmp); #endif if(via[B] & TREQ) break; /* ACK on-off */ via[B] |= TACK; udelay(ADB_DELAY); via[B] &= ~TACK; } /* end frame */ via[B] &= ~TIP; udelay(ADB_DELAY); } via[IER] = IER_SET | SR_INT; } /* All specifically VIA-related initialization goes here */ static int maciisi_init_via(void) { int i; /* Set the lines up. We want TREQ as input TACK|TIP as output */ via[DIRB] = (via[DIRB] | TACK | TIP) & ~TREQ; /* Shift register on input */ via[ACR] = (via[ACR] & ~SR_CTRL) | SR_EXT; #ifdef DEBUG_MACIISI_ADB printk(KERN_DEBUG "maciisi_init_via: initial status %x\n", via[B] & (TIP|TREQ)); #endif /* Wipe any pending data and int */ tmp = via[SR]; /* Enable keyboard interrupts */ via[IER] = IER_SET | SR_INT; /* Set initial state: idle */ via[B] &= ~(TACK|TIP); /* Clear interrupt bit */ via[IFR] = SR_INT; for(i = 0; i < 60; i++) { udelay(ADB_DELAY); maciisi_stfu(); udelay(ADB_DELAY); if(via[B] & TREQ) break; } if (i == 60) printk(KERN_ERR "maciisi_init_via: bus jam?\n"); maciisi_state = idle; need_sync = 0; return 0; } /* Send a request, possibly waiting for a reply */ static int maciisi_send_request(struct adb_request* req, int sync) { int i; #ifdef DEBUG_MACIISI_ADB static int dump_packet = 0; #endif if (via == NULL) { req->complete = 1; return -ENXIO; } #ifdef DEBUG_MACIISI_ADB if (dump_packet) { printk(KERN_DEBUG "maciisi_send_request:"); for (i = 0; i < req->nbytes; i++) { printk(" %.2x", req->data[i]); } printk(" sync %d\n", sync); } #endif req->reply_expected = 1; i = maciisi_write(req); if (i) { /* Normally, if a packet requires syncing, that happens at the end of * maciisi_send_request. But if the transfer fails, it will be restarted * by maciisi_interrupt(). We use need_sync to tell maciisi_interrupt * when to sync a packet that it sends out. * * Suggestions on a better way to do this are welcome. */ if(i == -EBUSY && sync) need_sync = 1; else need_sync = 0; return i; } if(sync) maciisi_sync(req); return 0; } /* Poll the ADB chip until the request completes */ static void maciisi_sync(struct adb_request *req) { int count = 0; #ifdef DEBUG_MACIISI_ADB printk(KERN_DEBUG "maciisi_sync called\n"); #endif /* If for some reason the ADB chip shuts up on us, we want to avoid an endless loop. */ while (!req->complete && count++ < 50) { maciisi_poll(); } /* This could be BAD... when the ADB controller doesn't respond * for this long, it's probably not coming back :-( */ if (count > 50) /* Hopefully shouldn't happen */ printk(KERN_ERR "maciisi_send_request: poll timed out!\n"); } int maciisi_request(struct adb_request *req, void (*done)(struct adb_request *), int nbytes, ...) { va_list list; int i; req->nbytes = nbytes; req->done = done; req->reply_expected = 0; va_start(list, nbytes); for (i = 0; i < nbytes; i++) req->data[i++] = va_arg(list, int); va_end(list); return maciisi_send_request(req, 1); } /* Enqueue a request, and run the queue if possible */ static int maciisi_write(struct adb_request* req) { unsigned long flags; int i; /* We will accept CUDA packets - the VIA sends them to us, so it figures that we should be able to send them to it */ if (req->nbytes < 2 || req->data[0] > CUDA_PACKET) { printk(KERN_ERR "maciisi_write: packet too small or not an ADB or CUDA packet\n"); req->complete = 1; return -EINVAL; } req->next = NULL; req->sent = 0; req->complete = 0; req->reply_len = 0; local_irq_save(flags); if (current_req) { last_req->next = req; last_req = req; } else { current_req = req; last_req = req; } if (maciisi_state == idle) { i = maciisi_start(); if(i != 0) { local_irq_restore(flags); return i; } } else { #ifdef DEBUG_MACIISI_ADB printk(KERN_DEBUG "maciisi_write: would start, but state is %d\n", maciisi_state); #endif local_irq_restore(flags); return -EBUSY; } local_irq_restore(flags); return 0; } static int maciisi_start(void) { struct adb_request* req; int status; #ifdef DEBUG_MACIISI_ADB status = via[B] & (TIP | TREQ); printk(KERN_DEBUG "maciisi_start called, state=%d, status=%x, ifr=%x\n", maciisi_state, status, via[IFR]); #endif if (maciisi_state != idle) { /* shouldn't happen */ printk(KERN_ERR "maciisi_start: maciisi_start called when driver busy!\n"); return -EBUSY; } req = current_req; if (req == NULL) return -EINVAL; status = via[B] & (TIP|TREQ); if (!(status & TREQ)) { #ifdef DEBUG_MACIISI_ADB printk(KERN_DEBUG "maciisi_start: bus busy - aborting\n"); #endif return -EBUSY; } /* Okay, send */ #ifdef DEBUG_MACIISI_ADB printk(KERN_DEBUG "maciisi_start: sending\n"); #endif /* Set state to active */ via[B] |= TIP; /* ACK off */ via[B] &= ~TACK; /* Delay */ udelay(ADB_DELAY); /* Shift out and send */ via[ACR] |= SR_OUT; via[SR] = req->data[0]; data_index = 1; /* ACK on */ via[B] |= TACK; maciisi_state = sending; return 0; } void maciisi_poll(void) { unsigned long flags; local_irq_save(flags); if (via[IFR] & SR_INT) { maciisi_interrupt(0, NULL); } else /* avoid calling this function too quickly in a loop */ udelay(ADB_DELAY); local_irq_restore(flags); } /* Shift register interrupt - this is *supposed* to mean that the register is either full or empty. In practice, I have no idea what it means :( */ static irqreturn_t maciisi_interrupt(int irq, void* arg) { int status; struct adb_request *req; #ifdef DEBUG_MACIISI_ADB static int dump_reply = 0; #endif int i; unsigned long flags; local_irq_save(flags); status = via[B] & (TIP|TREQ); #ifdef DEBUG_MACIISI_ADB printk(KERN_DEBUG "state %d status %x ifr %x\n", maciisi_state, status, via[IFR]); #endif if (!(via[IFR] & SR_INT)) { /* Shouldn't happen, we hope */ printk(KERN_ERR "maciisi_interrupt: called without interrupt flag set\n"); local_irq_restore(flags); return IRQ_NONE; } /* Clear the interrupt */ /* via[IFR] = SR_INT; */ switch_start: switch (maciisi_state) { case idle: if (status & TIP) printk(KERN_ERR "maciisi_interrupt: state is idle but TIP asserted!\n"); if(!reading_reply) udelay(ADB_DELAY); /* Shift in */ via[ACR] &= ~SR_OUT; /* Signal start of frame */ via[B] |= TIP; /* Clear the interrupt (throw this value on the floor, it's useless) */ tmp = via[SR]; /* ACK adb chip, high-low */ via[B] |= TACK; udelay(ADB_DELAY); via[B] &= ~TACK; reply_len = 0; maciisi_state = reading; if (reading_reply) { reply_ptr = current_req->reply; } else { reply_ptr = maciisi_rbuf; } break; case sending: /* via[SR]; */ /* Set ACK off */ via[B] &= ~TACK; req = current_req; if (!(status & TREQ)) { /* collision */ printk(KERN_ERR "maciisi_interrupt: send collision\n"); /* Set idle and input */ via[ACR] &= ~SR_OUT; tmp = via[SR]; via[B] &= ~TIP; /* Must re-send */ reading_reply = 0; reply_len = 0; maciisi_state = idle; udelay(ADB_DELAY); /* process this now, because the IFR has been cleared */ goto switch_start; } udelay(ADB_DELAY); if (data_index >= req->nbytes) { /* Sent the whole packet, put the bus back in idle state */ /* Shift in, we are about to read a reply (hopefully) */ via[ACR] &= ~SR_OUT; tmp = via[SR]; /* End of frame */ via[B] &= ~TIP; req->sent = 1; maciisi_state = idle; if (req->reply_expected) { /* Note: only set this once we've successfully sent the packet */ reading_reply = 1; } else { current_req = req->next; if (req->done) (*req->done)(req); /* Do any queued requests now */ i = maciisi_start(); if(i == 0 && need_sync) { /* Packet needs to be synced */ maciisi_sync(current_req); } if(i != -EBUSY) need_sync = 0; } } else { /* Sending more stuff */ /* Shift out */ via[ACR] |= SR_OUT; /* Write */ via[SR] = req->data[data_index++]; /* Signal 'byte ready' */ via[B] |= TACK; } break; case reading: /* Shift in */ /* via[ACR] &= ~SR_OUT; */ /* Not in 2.2 */ if (reply_len++ > 16) { printk(KERN_ERR "maciisi_interrupt: reply too long, aborting read\n"); via[B] |= TACK; udelay(ADB_DELAY); via[B] &= ~(TACK|TIP); maciisi_state = idle; i = maciisi_start(); if(i == 0 && need_sync) { /* Packet needs to be synced */ maciisi_sync(current_req); } if(i != -EBUSY) need_sync = 0; break; } /* Read data */ *reply_ptr++ = via[SR]; status = via[B] & (TIP|TREQ); /* ACK on/off */ via[B] |= TACK; udelay(ADB_DELAY); via[B] &= ~TACK; if (!(status & TREQ)) break; /* more stuff to deal with */ /* end of frame */ via[B] &= ~TIP; tmp = via[SR]; /* That's what happens in 2.2 */ udelay(ADB_DELAY); /* Give controller time to recover */ /* end of packet, deal with it */ if (reading_reply) { req = current_req; req->reply_len = reply_ptr - req->reply; if (req->data[0] == ADB_PACKET) { /* Have to adjust the reply from ADB commands */ if (req->reply_len <= 2 || (req->reply[1] & 2) != 0) { /* the 0x2 bit indicates no response */ req->reply_len = 0; } else { /* leave just the command and result bytes in the reply */ req->reply_len -= 2; memmove(req->reply, req->reply + 2, req->reply_len); } } #ifdef DEBUG_MACIISI_ADB if (dump_reply) { int i; printk(KERN_DEBUG "maciisi_interrupt: reply is "); for (i = 0; i < req->reply_len; ++i) printk(" %.2x", req->reply[i]); printk("\n"); } #endif req->complete = 1; current_req = req->next; if (req->done) (*req->done)(req); /* Obviously, we got it */ reading_reply = 0; } else { maciisi_input(maciisi_rbuf, reply_ptr - maciisi_rbuf); } maciisi_state = idle; status = via[B] & (TIP|TREQ); if (!(status & TREQ)) { /* Timeout?! More likely, another packet coming in already */ #ifdef DEBUG_MACIISI_ADB printk(KERN_DEBUG "extra data after packet: status %x ifr %x\n", status, via[IFR]); #endif #if 0 udelay(ADB_DELAY); via[B] |= TIP; maciisi_state = reading; reading_reply = 0; reply_ptr = maciisi_rbuf; #else /* Process the packet now */ reading_reply = 0; goto switch_start; #endif /* We used to do this... but the controller might actually have data for us */ /* maciisi_stfu(); */ } else { /* Do any queued requests now if possible */ i = maciisi_start(); if(i == 0 && need_sync) { /* Packet needs to be synced */ maciisi_sync(current_req); } if(i != -EBUSY) need_sync = 0; } break; default: printk("maciisi_interrupt: unknown maciisi_state %d?\n", maciisi_state); } local_irq_restore(flags); return IRQ_HANDLED; } static void maciisi_input(unsigned char *buf, int nb) { #ifdef DEBUG_MACIISI_ADB int i; #endif switch (buf[0]) { case ADB_PACKET: adb_input(buf+2, nb-2, buf[1] & 0x40); break; default: #ifdef DEBUG_MACIISI_ADB printk(KERN_DEBUG "data from IIsi ADB (%d bytes):", nb); for (i = 0; i < nb; ++i) printk(" %.2x", buf[i]); printk("\n"); #endif break; } }
gpl-2.0
adis1313/android_kernel_samsung_msm8974
arch/arm/mach-gemini/devices.c
10
2861
/* * Common devices definition for Gemini * * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/serial_8250.h> #include <linux/mtd/physmap.h> #include <mach/irqs.h> #include <mach/hardware.h> #include <mach/global_reg.h> static struct plat_serial8250_port serial_platform_data[] = { { .membase = (void *)IO_ADDRESS(GEMINI_UART_BASE), .mapbase = GEMINI_UART_BASE, .irq = IRQ_UART, .uartclk = UART_CLK, .regshift = 2, .iotype = UPIO_MEM, .type = PORT_16550A, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_FIXED_TYPE, }, {}, }; static struct platform_device serial_device = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, .dev = { .platform_data = serial_platform_data, }, }; int platform_register_uart(void) { return platform_device_register(&serial_device); } static struct resource flash_resource = { .start = GEMINI_FLASH_BASE, .flags = IORESOURCE_MEM, }; static struct physmap_flash_data pflash_platform_data = {}; static struct platform_device pflash_device = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &pflash_platform_data, }, .resource = &flash_resource, .num_resources = 1, }; int platform_register_pflash(unsigned int size, struct mtd_partition *parts, unsigned int nr_parts) { unsigned int reg; reg = __raw_readl(IO_ADDRESS(GEMINI_GLOBAL_BASE) + GLOBAL_STATUS); if ((reg & FLASH_TYPE_MASK) != FLASH_TYPE_PARALLEL) return -ENXIO; if (reg & FLASH_WIDTH_16BIT) pflash_platform_data.width = 2; else pflash_platform_data.width = 1; /* enable parallel flash pins and disable others */ reg = __raw_readl(IO_ADDRESS(GEMINI_GLOBAL_BASE) + GLOBAL_MISC_CTRL); reg &= ~PFLASH_PADS_DISABLE; reg |= SFLASH_PADS_DISABLE | NAND_PADS_DISABLE; __raw_writel(reg, IO_ADDRESS(GEMINI_GLOBAL_BASE) + GLOBAL_MISC_CTRL); flash_resource.end = flash_resource.start + size - 1; pflash_platform_data.parts = parts; pflash_platform_data.nr_parts = nr_parts; return platform_device_register(&pflash_device); } static struct resource gemini_rtc_resources[] = { [0] = { .start = GEMINI_RTC_BASE, .end = GEMINI_RTC_BASE + 0x24, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_RTC, .end = IRQ_RTC, .flags = IORESOURCE_IRQ, }, }; static struct platform_device gemini_rtc_device = { .name = "rtc-gemini", .id = 0, .num_resources = ARRAY_SIZE(gemini_rtc_resources), .resource = gemini_rtc_resources, }; int __init platform_register_rtc(void) { return platform_device_register(&gemini_rtc_device); }
gpl-2.0
uwehermann/easybox-904-xdsl-firmware
linux/linux-2.6.32.32/drivers/input/misc/gpio_buttons.c
10
5290
/* * Driver for buttons on GPIO lines not capable of generating interrupts * * Copyright (C) 2007-2010 Gabor Juhos <juhosg@openwrt.org> * Copyright (C) 2010 Nuno Goncalves <nunojpg@gmail.com> * * This file was based on: /drivers/input/misc/cobalt_btns.c * Copyright (C) 2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> * * also was based on: /drivers/input/keyboard/gpio_keys.c * Copyright 2005 Phil Blundell * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/input.h> #include <linux/input-polldev.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/gpio_buttons.h> #include <asm/gpio.h> #define DRV_NAME "gpio-buttons" #define DRV_VERSION "0.1.2" #define PFX DRV_NAME ": " struct gpio_button_data { int last_state; int count; }; struct gpio_buttons_dev { struct input_polled_dev *poll_dev; struct gpio_buttons_platform_data *pdata; struct gpio_button_data *data; }; static void gpio_buttons_poll(struct input_polled_dev *dev) { struct gpio_buttons_dev *bdev = dev->private; struct gpio_buttons_platform_data *pdata = bdev->pdata; struct input_dev *input = dev->input; int i; for (i = 0; i < bdev->pdata->nbuttons; i++) { struct gpio_button *button = &pdata->buttons[i]; unsigned int type = button->type ?: EV_KEY; int state; if (bdev->data[i].count < button->threshold) { bdev->data[i].count++; continue; } state = gpio_get_value(button->gpio) ? 1 : 0; if (state != bdev->data[i].last_state) { input_event(input, type, button->code, !!(state ^ button->active_low)); input_sync(input); bdev->data[i].count = 0; bdev->data[i].last_state = state; } } } static int __devinit gpio_buttons_probe(struct platform_device *pdev) { struct gpio_buttons_platform_data *pdata = pdev->dev.platform_data; struct gpio_buttons_dev *bdev; struct input_polled_dev *poll_dev; struct input_dev *input; int error, i; if (!pdata) return -ENXIO; bdev = kzalloc(sizeof(struct gpio_buttons_dev) + sizeof(struct gpio_button_data) * pdata->nbuttons, GFP_KERNEL); if (!bdev) { printk(KERN_ERR DRV_NAME "no memory for device\n"); return -ENOMEM; } bdev->data = (struct gpio_button_data *) &bdev[1]; poll_dev = input_allocate_polled_device(); if (!poll_dev) { printk(KERN_ERR DRV_NAME "no memory for polled device\n"); error = -ENOMEM; goto err_free_bdev; } poll_dev->private = bdev; poll_dev->poll = gpio_buttons_poll; poll_dev->poll_interval = pdata->poll_interval; input = poll_dev->input; input->evbit[0] = BIT(EV_KEY); input->name = pdev->name; input->phys = "gpio-buttons/input0"; input->dev.parent = &pdev->dev; input->id.bustype = BUS_HOST; input->id.vendor = 0x0001; input->id.product = 0x0001; input->id.version = 0x0100; for (i = 0; i < pdata->nbuttons; i++) { struct gpio_button *button = &pdata->buttons[i]; unsigned int gpio = button->gpio; unsigned int type = button->type ?: EV_KEY; error = gpio_request(gpio, button->desc ? button->desc : DRV_NAME); if (error) { printk(KERN_ERR PFX "unable to claim gpio %u, " "error %d\n", gpio, error); goto err_free_gpio; } error = gpio_direction_input(gpio); if (error) { printk(KERN_ERR PFX "unable to set direction on " "gpio %u, error %d\n", gpio, error); goto err_free_gpio; } input_set_capability(input, type, button->code); bdev->data[i].last_state = gpio_get_value(button->gpio) ? 1 : 0; } bdev->poll_dev = poll_dev; bdev->pdata = pdata; platform_set_drvdata(pdev, bdev); error = input_register_polled_device(poll_dev); if (error) { printk(KERN_ERR PFX "unable to register polled device, " "error %d\n", error); goto err_free_gpio; } return 0; err_free_gpio: for (i = i - 1; i >= 0; i--) gpio_free(pdata->buttons[i].gpio); input_free_polled_device(poll_dev); err_free_bdev: kfree(bdev); platform_set_drvdata(pdev, NULL); return error; } static int __devexit gpio_buttons_remove(struct platform_device *pdev) { struct gpio_buttons_dev *bdev = platform_get_drvdata(pdev); struct gpio_buttons_platform_data *pdata = bdev->pdata; int i; input_unregister_polled_device(bdev->poll_dev); for (i = 0; i < pdata->nbuttons; i++) gpio_free(pdata->buttons[i].gpio); input_free_polled_device(bdev->poll_dev); kfree(bdev); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver gpio_buttons_driver = { .probe = gpio_buttons_probe, .remove = __devexit_p(gpio_buttons_remove), .driver = { .name = DRV_NAME, .owner = THIS_MODULE, }, }; static int __init gpio_buttons_init(void) { printk(KERN_INFO DRV_NAME " driver version " DRV_VERSION "\n"); return platform_driver_register(&gpio_buttons_driver); } static void __exit gpio_buttons_exit(void) { platform_driver_unregister(&gpio_buttons_driver); } module_init(gpio_buttons_init); module_exit(gpio_buttons_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Gabor Juhos <juhosg at openwrt.org>"); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("Polled buttons driver for CPU GPIOs");
gpl-2.0
HossainKhademian/XBMC
xbmc/cores/dvdplayer/DVDCodecs/Video/DVDVideoCodecFFmpeg.cpp
10
25729
/* * Copyright (C) 2005-2013 Team XBMC * http://xbmc.org * * This Program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This Program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with XBMC; see the file COPYING. If not, see * <http://www.gnu.org/licenses/>. * */ #include "system.h" #if (defined HAVE_CONFIG_H) && (!defined TARGET_WINDOWS) #include "config.h" #endif #include "DVDVideoCodecFFmpeg.h" #include "DVDDemuxers/DVDDemux.h" #include "DVDStreamInfo.h" #include "DVDClock.h" #include "DVDCodecs/DVDCodecs.h" #include "DVDCodecs/DVDCodecUtils.h" #include "DVDVideoPPFFmpeg.h" #if defined(TARGET_POSIX) || defined(TARGET_WINDOWS) #include "utils/CPUInfo.h" #endif #include "settings/AdvancedSettings.h" #include "settings/Settings.h" #include "settings/VideoSettings.h" #include "utils/log.h" #include "boost/shared_ptr.hpp" #include "threads/Atomics.h" #ifndef TARGET_POSIX #define RINT(x) ((x) >= 0 ? ((int)((x) + 0.5)) : ((int)((x) - 0.5))) #else #include <math.h> #define RINT lrint #endif #include "cores/VideoRenderers/RenderManager.h" #include "cores/VideoRenderers/RenderFormats.h" #ifdef HAVE_LIBVDPAU #include "VDPAU.h" #endif #ifdef HAS_DX #include "DXVA.h" #endif #ifdef HAVE_LIBVA #include "VAAPI.h" #endif #ifdef TARGET_DARWIN_OSX #include "VDA.h" #endif #include "utils/StringUtils.h" extern "C" { #include "libavutil/opt.h" #include "libavfilter/avfilter.h" #include "libavfilter/buffersink.h" #include "libavfilter/buffersrc.h" } using namespace boost; enum PixelFormat CDVDVideoCodecFFmpeg::GetFormat( struct AVCodecContext * avctx , const PixelFormat * fmt ) { CDVDVideoCodecFFmpeg* ctx = (CDVDVideoCodecFFmpeg*)avctx->opaque; // if frame threading is enabled hw accel is not allowed if((EDECODEMETHOD) CSettings::Get().GetInt("videoplayer.decodingmethod") != VS_DECODEMETHOD_HARDWARE || !ctx->IsHardwareAllowed()) return avcodec_default_get_format(avctx, fmt); const PixelFormat * cur = fmt; while(*cur != PIX_FMT_NONE) { #ifdef HAVE_LIBVDPAU if(VDPAU::CDecoder::IsVDPAUFormat(*cur) && CSettings::Get().GetBool("videoplayer.usevdpau")) { CLog::Log(LOGNOTICE,"CDVDVideoCodecFFmpeg::GetFormat - Creating VDPAU(%ix%i)", avctx->width, avctx->height); VDPAU::CDecoder* vdp = new VDPAU::CDecoder(); if(vdp->Open(avctx, *cur, ctx->m_uSurfacesCount)) { ctx->SetHardware(vdp); return *cur; } else vdp->Release(); } #endif #ifdef HAS_DX if(DXVA::CDecoder::Supports(*cur) && CSettings::Get().GetBool("videoplayer.usedxva2")) { CLog::Log(LOGNOTICE, "CDVDVideoCodecFFmpeg::GetFormat - Creating DXVA(%ix%i)", avctx->width, avctx->height); DXVA::CDecoder* dec = new DXVA::CDecoder(); if(dec->Open(avctx, *cur, ctx->m_uSurfacesCount)) { ctx->SetHardware(dec); return *cur; } else dec->Release(); } #endif #ifdef HAVE_LIBVA // mpeg4 vaapi decoding is disabled if(*cur == PIX_FMT_VAAPI_VLD && CSettings::Get().GetBool("videoplayer.usevaapi")) { VAAPI::CDecoder* dec = new VAAPI::CDecoder(); if(dec->Open(avctx, *cur, ctx->m_uSurfacesCount)) { ctx->SetHardware(dec); return *cur; } else dec->Release(); } #endif #ifdef TARGET_DARWIN_OSX if (*cur == AV_PIX_FMT_VDA && CSettings::Get().GetBool("videoplayer.usevda") && g_advancedSettings.m_useFfmpegVda) { VDA::CDecoder* dec = new VDA::CDecoder(); if(dec->Open(avctx, *cur, ctx->m_uSurfacesCount)) { ctx->SetHardware(dec); return *cur; } else dec->Release(); } #endif cur++; } // hardware decoder de-selected, restore standard ffmpeg if (ctx->GetHardware()) { ctx->SetHardware(NULL); avctx->get_buffer2 = avcodec_default_get_buffer2; avctx->slice_flags = 0; avctx->hwaccel_context = 0; } return avcodec_default_get_format(avctx, fmt); } CDVDVideoCodecFFmpeg::CDVDVideoCodecFFmpeg() : CDVDVideoCodec() { m_pCodecContext = NULL; m_pFrame = NULL; m_pFilterGraph = NULL; m_pFilterIn = NULL; m_pFilterOut = NULL; m_pFilterFrame = NULL; m_iPictureWidth = 0; m_iPictureHeight = 0; m_uSurfacesCount = 0; m_iScreenWidth = 0; m_iScreenHeight = 0; m_iOrientation = 0; m_bSoftware = false; #if defined(TARGET_ANDROID) || defined(TARGET_DARWIN_IOS) // If we get here on Android or iOS, it's always software m_isSWCodec = true; #else m_isSWCodec = false; #endif m_pHardware = NULL; m_iLastKeyframe = 0; m_dts = DVD_NOPTS_VALUE; m_started = false; m_decoderPts = DVD_NOPTS_VALUE; m_codecControlFlags = 0; m_requestSkipDeint = false; } CDVDVideoCodecFFmpeg::~CDVDVideoCodecFFmpeg() { Dispose(); } bool CDVDVideoCodecFFmpeg::Open(CDVDStreamInfo &hints, CDVDCodecOptions &options) { AVCodec* pCodec; m_bSoftware = hints.software; m_iOrientation = hints.orientation; for(std::vector<ERenderFormat>::iterator it = options.m_formats.begin(); it != options.m_formats.end(); ++it) { m_formats.push_back((PixelFormat)CDVDCodecUtils::PixfmtFromEFormat(*it)); if(*it == RENDER_FMT_YUV420P) m_formats.push_back(PIX_FMT_YUVJ420P); } m_formats.push_back(PIX_FMT_NONE); /* always add none to get a terminated list in ffmpeg world */ pCodec = NULL; m_pCodecContext = NULL; if (hints.codec == AV_CODEC_ID_H264) { switch(hints.profile) { case FF_PROFILE_H264_HIGH_10: case FF_PROFILE_H264_HIGH_10_INTRA: case FF_PROFILE_H264_HIGH_422: case FF_PROFILE_H264_HIGH_422_INTRA: case FF_PROFILE_H264_HIGH_444_PREDICTIVE: case FF_PROFILE_H264_HIGH_444_INTRA: case FF_PROFILE_H264_CAVLC_444: // this is needed to not open the decoders m_bSoftware = true; // this we need to enable multithreading for hi10p via advancedsettings m_isSWCodec = true; break; } } else if (hints.codec == AV_CODEC_ID_HEVC || hints.codec == AV_CODEC_ID_VP9) m_isSWCodec = true; if(pCodec == NULL) pCodec = avcodec_find_decoder(hints.codec); if(pCodec == NULL) { CLog::Log(LOGDEBUG,"CDVDVideoCodecFFmpeg::Open() Unable to find codec %d", hints.codec); return false; } CLog::Log(LOGNOTICE,"CDVDVideoCodecFFmpeg::Open() Using codec: %s",pCodec->long_name ? pCodec->long_name : pCodec->name); if(m_pCodecContext == NULL) m_pCodecContext = avcodec_alloc_context3(pCodec); m_pCodecContext->opaque = (void*)this; m_pCodecContext->debug_mv = 0; m_pCodecContext->debug = 0; m_pCodecContext->workaround_bugs = FF_BUG_AUTODETECT; m_pCodecContext->get_format = GetFormat; m_pCodecContext->codec_tag = hints.codec_tag; /* Only allow slice threading, since frame threading is more * sensitive to changes in frame sizes, and it causes crashes * during HW accell - so we unset it in this case. * */ if ((EDECODEMETHOD) CSettings::Get().GetInt("videoplayer.decodingmethod") == VS_DECODEMETHOD_SOFTWARE || m_isSWCodec) { CLog::Log(LOGDEBUG,"CDVDVideoCodecFFmpeg::Open() Keeping default threading %d", m_pCodecContext->thread_type); } else m_pCodecContext->thread_type = FF_THREAD_SLICE; #if defined(TARGET_DARWIN_IOS) // ffmpeg with enabled neon will crash and burn if this is enabled m_pCodecContext->flags &= CODEC_FLAG_EMU_EDGE; #else if (pCodec->id != AV_CODEC_ID_H264 && pCodec->capabilities & CODEC_CAP_DR1 && pCodec->id != AV_CODEC_ID_VP8 ) m_pCodecContext->flags |= CODEC_FLAG_EMU_EDGE; #endif // if we don't do this, then some codecs seem to fail. m_pCodecContext->coded_height = hints.height; m_pCodecContext->coded_width = hints.width; m_pCodecContext->bits_per_coded_sample = hints.bitsperpixel; if( hints.extradata && hints.extrasize > 0 ) { m_pCodecContext->extradata_size = hints.extrasize; m_pCodecContext->extradata = (uint8_t*)av_mallocz(hints.extrasize + FF_INPUT_BUFFER_PADDING_SIZE); memcpy(m_pCodecContext->extradata, hints.extradata, hints.extrasize); } // advanced setting override for skip loop filter (see avcodec.h for valid options) // TODO: allow per video setting? if (g_advancedSettings.m_iSkipLoopFilter != 0) { m_pCodecContext->skip_loop_filter = (AVDiscard)g_advancedSettings.m_iSkipLoopFilter; } // set any special options for(std::vector<CDVDCodecOption>::iterator it = options.m_keys.begin(); it != options.m_keys.end(); ++it) { if (it->m_name == "surfaces") m_uSurfacesCount = std::atoi(it->m_value.c_str()); else av_opt_set(m_pCodecContext, it->m_name.c_str(), it->m_value.c_str(), 0); } int num_threads = std::min(8 /*MAX_THREADS*/, g_cpuInfo.getCPUCount()); if( num_threads > 1 && !hints.software && m_pHardware == NULL // thumbnail extraction fails when run threaded && ( pCodec->id == AV_CODEC_ID_H264 || pCodec->id == AV_CODEC_ID_MPEG4 || pCodec->id == AV_CODEC_ID_HEVC || pCodec->id == AV_CODEC_ID_VP9)) m_pCodecContext->thread_count = num_threads; if (avcodec_open2(m_pCodecContext, pCodec, NULL) < 0) { CLog::Log(LOGDEBUG,"CDVDVideoCodecFFmpeg::Open() Unable to open codec"); return false; } m_pFrame = av_frame_alloc(); if (!m_pFrame) return false; m_pFilterFrame = av_frame_alloc(); if (!m_pFilterFrame) return false; UpdateName(); return true; } void CDVDVideoCodecFFmpeg::Dispose() { if (m_pFrame) av_free(m_pFrame); m_pFrame = NULL; av_frame_free(&m_pFilterFrame); if (m_pCodecContext) { if (m_pCodecContext->codec) avcodec_close(m_pCodecContext); if (m_pCodecContext->extradata) { av_free(m_pCodecContext->extradata); m_pCodecContext->extradata = NULL; m_pCodecContext->extradata_size = 0; } av_free(m_pCodecContext); m_pCodecContext = NULL; } SAFE_RELEASE(m_pHardware); DisposeHWDecoders(); FilterClose(); } void CDVDVideoCodecFFmpeg::SetDropState(bool bDrop) { if( m_pCodecContext ) { if (bDrop && m_pHardware && m_pHardware->CanSkipDeint()) { m_requestSkipDeint = true; bDrop = false; } else m_requestSkipDeint = false; // i don't know exactly how high this should be set // couldn't find any good docs on it. think it varies // from codec to codec on what it does // 2 seem to be to high.. it causes video to be ruined on following images if( bDrop ) { m_pCodecContext->skip_frame = AVDISCARD_NONREF; m_pCodecContext->skip_idct = AVDISCARD_NONREF; m_pCodecContext->skip_loop_filter = AVDISCARD_NONREF; } else { m_pCodecContext->skip_frame = AVDISCARD_DEFAULT; m_pCodecContext->skip_idct = AVDISCARD_DEFAULT; m_pCodecContext->skip_loop_filter = AVDISCARD_DEFAULT; } } } unsigned int CDVDVideoCodecFFmpeg::SetFilters(unsigned int flags) { m_filters_next.clear(); if(m_pHardware) return 0; if(flags & FILTER_ROTATE) { switch(m_iOrientation) { case 90: m_filters_next += "transpose=1"; break; case 180: m_filters_next += "vflip,hflip"; break; case 270: m_filters_next += "transpose=2"; break; default: break; } } if(flags & FILTER_DEINTERLACE_YADIF) { if(flags & FILTER_DEINTERLACE_HALFED) m_filters_next = "yadif=0:-1"; else m_filters_next = "yadif=1:-1"; if(flags & FILTER_DEINTERLACE_FLAGGED) m_filters_next += ":1"; flags &= ~FILTER_DEINTERLACE_ANY | FILTER_DEINTERLACE_YADIF; } return flags; } union pts_union { double pts_d; int64_t pts_i; }; static int64_t pts_dtoi(double pts) { pts_union u; u.pts_d = pts; return u.pts_i; } int CDVDVideoCodecFFmpeg::Decode(uint8_t* pData, int iSize, double dts, double pts) { int iGotPicture = 0, len = 0; if (!m_pCodecContext) return VC_ERROR; if(pData) m_iLastKeyframe++; shared_ptr<CSingleLock> lock; if(m_pHardware) { CCriticalSection* section = m_pHardware->Section(); if(section) lock = shared_ptr<CSingleLock>(new CSingleLock(*section)); int result; if(pData) result = m_pHardware->Check(m_pCodecContext); else result = m_pHardware->Decode(m_pCodecContext, NULL); if(result) return result; } if(m_pFilterGraph) { int result = 0; if(pData == NULL) result = FilterProcess(NULL); if(result) return result; } m_dts = dts; m_pCodecContext->reordered_opaque = pts_dtoi(pts); AVPacket avpkt; av_init_packet(&avpkt); avpkt.data = pData; avpkt.size = iSize; #define SET_PKT_TS(ts) \ if(ts != DVD_NOPTS_VALUE)\ avpkt.ts = (ts / DVD_TIME_BASE) * AV_TIME_BASE;\ else\ avpkt.ts = AV_NOPTS_VALUE SET_PKT_TS(pts); SET_PKT_TS(dts); #undef SET_PKT_TS /* We lie, but this flag is only used by pngdec.c. * Setting it correctly would allow CorePNG decoding. */ avpkt.flags = AV_PKT_FLAG_KEY; len = avcodec_decode_video2(m_pCodecContext, m_pFrame, &iGotPicture, &avpkt); if(m_iLastKeyframe < m_pCodecContext->has_b_frames + 2) m_iLastKeyframe = m_pCodecContext->has_b_frames + 2; if (len < 0) { CLog::Log(LOGERROR, "%s - avcodec_decode_video returned failure", __FUNCTION__); return VC_ERROR; } if (!iGotPicture) return VC_BUFFER; if(m_pFrame->key_frame) { m_started = true; m_iLastKeyframe = m_pCodecContext->has_b_frames + 2; } /* put a limit on convergence count to avoid huge mem usage on streams without keyframes */ if(m_iLastKeyframe > 300) m_iLastKeyframe = 300; /* h264 doesn't always have keyframes + won't output before first keyframe anyway */ if(m_pCodecContext->codec_id == AV_CODEC_ID_H264 || m_pCodecContext->codec_id == AV_CODEC_ID_SVQ3) m_started = true; if(m_pHardware == NULL) { bool need_scale = std::find( m_formats.begin() , m_formats.end() , m_pCodecContext->pix_fmt) == m_formats.end(); bool need_reopen = false; if(m_filters != m_filters_next) need_reopen = true; if(m_pFilterIn) { if(m_pFilterIn->outputs[0]->format != m_pCodecContext->pix_fmt || m_pFilterIn->outputs[0]->w != m_pCodecContext->width || m_pFilterIn->outputs[0]->h != m_pCodecContext->height) need_reopen = true; } // try to setup new filters if (need_reopen || (need_scale && m_pFilterGraph == NULL)) { m_filters = m_filters_next; if(FilterOpen(m_filters, need_scale) < 0) FilterClose(); } } int result; if(m_pHardware) result = m_pHardware->Decode(m_pCodecContext, m_pFrame); else if(m_pFilterGraph) result = FilterProcess(m_pFrame); else result = VC_PICTURE | VC_BUFFER; if(result & VC_FLUSHED) Reset(); DisposeHWDecoders(); return result; } void CDVDVideoCodecFFmpeg::Reset() { m_started = false; m_decoderPts = DVD_NOPTS_VALUE; m_iLastKeyframe = m_pCodecContext->has_b_frames; avcodec_flush_buffers(m_pCodecContext); if (m_pHardware) m_pHardware->Reset(); m_filters = ""; FilterClose(); } bool CDVDVideoCodecFFmpeg::GetPictureCommon(DVDVideoPicture* pDvdVideoPicture) { if (!m_pFrame) return false; pDvdVideoPicture->iWidth = m_pFrame->width; pDvdVideoPicture->iHeight = m_pFrame->height; /* crop of 10 pixels if demuxer asked it */ if(m_pCodecContext->coded_width && m_pCodecContext->coded_width < (int)pDvdVideoPicture->iWidth && m_pCodecContext->coded_width > (int)pDvdVideoPicture->iWidth - 10) pDvdVideoPicture->iWidth = m_pCodecContext->coded_width; if(m_pCodecContext->coded_height && m_pCodecContext->coded_height < (int)pDvdVideoPicture->iHeight && m_pCodecContext->coded_height > (int)pDvdVideoPicture->iHeight - 10) pDvdVideoPicture->iHeight = m_pCodecContext->coded_height; double aspect_ratio; /* use variable in the frame */ AVRational pixel_aspect = m_pFrame->sample_aspect_ratio; if (pixel_aspect.num == 0) aspect_ratio = 0; else aspect_ratio = av_q2d(pixel_aspect) * pDvdVideoPicture->iWidth / pDvdVideoPicture->iHeight; if (aspect_ratio <= 0.0) aspect_ratio = (float)pDvdVideoPicture->iWidth / (float)pDvdVideoPicture->iHeight; /* XXX: we suppose the screen has a 1.0 pixel ratio */ // CDVDVideo will compensate it. pDvdVideoPicture->iDisplayHeight = pDvdVideoPicture->iHeight; pDvdVideoPicture->iDisplayWidth = ((int)RINT(pDvdVideoPicture->iHeight * aspect_ratio)) & -3; if (pDvdVideoPicture->iDisplayWidth > pDvdVideoPicture->iWidth) { pDvdVideoPicture->iDisplayWidth = pDvdVideoPicture->iWidth; pDvdVideoPicture->iDisplayHeight = ((int)RINT(pDvdVideoPicture->iWidth / aspect_ratio)) & -3; } pDvdVideoPicture->pts = DVD_NOPTS_VALUE; AVDictionaryEntry * entry = av_dict_get(av_frame_get_metadata(m_pFrame), "stereo_mode", NULL, 0); if(entry && entry->value) { strncpy(pDvdVideoPicture->stereo_mode, (const char*)entry->value, sizeof(pDvdVideoPicture->stereo_mode)); pDvdVideoPicture->stereo_mode[sizeof(pDvdVideoPicture->stereo_mode)-1] = '\0'; } pDvdVideoPicture->iRepeatPicture = 0.5 * m_pFrame->repeat_pict; pDvdVideoPicture->iFlags = DVP_FLAG_ALLOCATED; pDvdVideoPicture->iFlags |= m_pFrame->interlaced_frame ? DVP_FLAG_INTERLACED : 0; pDvdVideoPicture->iFlags |= m_pFrame->top_field_first ? DVP_FLAG_TOP_FIELD_FIRST: 0; pDvdVideoPicture->chroma_position = m_pCodecContext->chroma_sample_location; pDvdVideoPicture->color_primaries = m_pCodecContext->color_primaries; pDvdVideoPicture->color_transfer = m_pCodecContext->color_trc; pDvdVideoPicture->color_matrix = m_pCodecContext->colorspace; if(m_pCodecContext->color_range == AVCOL_RANGE_JPEG || m_pCodecContext->pix_fmt == PIX_FMT_YUVJ420P) pDvdVideoPicture->color_range = 1; else pDvdVideoPicture->color_range = 0; int qscale_type; pDvdVideoPicture->qp_table = av_frame_get_qp_table(m_pFrame, &pDvdVideoPicture->qstride, &qscale_type); switch (qscale_type) { case FF_QSCALE_TYPE_MPEG1: pDvdVideoPicture->qscale_type = DVP_QSCALE_MPEG1; break; case FF_QSCALE_TYPE_MPEG2: pDvdVideoPicture->qscale_type = DVP_QSCALE_MPEG2; break; case FF_QSCALE_TYPE_H264: pDvdVideoPicture->qscale_type = DVP_QSCALE_H264; break; default: pDvdVideoPicture->qscale_type = DVP_QSCALE_UNKNOWN; } if (pDvdVideoPicture->iRepeatPicture) pDvdVideoPicture->dts = DVD_NOPTS_VALUE; else pDvdVideoPicture->dts = m_dts; m_dts = DVD_NOPTS_VALUE; int64_t bpts = av_frame_get_best_effort_timestamp(m_pFrame); if(bpts != AV_NOPTS_VALUE) { pDvdVideoPicture->pts = (double)bpts * DVD_TIME_BASE / AV_TIME_BASE; if (pDvdVideoPicture->pts == m_decoderPts) { pDvdVideoPicture->iRepeatPicture = -0.5; pDvdVideoPicture->pts = DVD_NOPTS_VALUE; pDvdVideoPicture->dts = DVD_NOPTS_VALUE; } } else pDvdVideoPicture->pts = DVD_NOPTS_VALUE; if (pDvdVideoPicture->pts != DVD_NOPTS_VALUE) m_decoderPts = pDvdVideoPicture->pts; else m_decoderPts = m_dts; if (m_requestSkipDeint) { pDvdVideoPicture->iFlags |= DVD_CODEC_CTRL_SKIPDEINT; m_skippedDeint = 1; } else m_skippedDeint = 0; m_requestSkipDeint = false; pDvdVideoPicture->iFlags |= m_codecControlFlags; if(!m_started) pDvdVideoPicture->iFlags |= DVP_FLAG_DROPPED; return true; } bool CDVDVideoCodecFFmpeg::GetPicture(DVDVideoPicture* pDvdVideoPicture) { if(m_pHardware) return m_pHardware->GetPicture(m_pCodecContext, m_pFrame, pDvdVideoPicture); if(!GetPictureCommon(pDvdVideoPicture)) return false; { for (int i = 0; i < 4; i++) pDvdVideoPicture->data[i] = m_pFrame->data[i]; for (int i = 0; i < 4; i++) pDvdVideoPicture->iLineSize[i] = m_pFrame->linesize[i]; } pDvdVideoPicture->iFlags |= pDvdVideoPicture->data[0] ? 0 : DVP_FLAG_DROPPED; pDvdVideoPicture->extended_format = 0; PixelFormat pix_fmt; pix_fmt = (PixelFormat)m_pFrame->format; pDvdVideoPicture->format = CDVDCodecUtils::EFormatFromPixfmt(pix_fmt); return true; } int CDVDVideoCodecFFmpeg::FilterOpen(const std::string& filters, bool scale) { int result; if (m_pFilterGraph) FilterClose(); if (filters.empty() && !scale) return 0; if (m_pHardware) { CLog::Log(LOGWARNING, "CDVDVideoCodecFFmpeg::FilterOpen - skipped opening filters on hardware decode"); return 0; } if (!(m_pFilterGraph = avfilter_graph_alloc())) { CLog::Log(LOGERROR, "CDVDVideoCodecFFmpeg::FilterOpen - unable to alloc filter graph"); return -1; } AVFilter* srcFilter = avfilter_get_by_name("buffer"); AVFilter* outFilter = avfilter_get_by_name("buffersink"); // should be last filter in the graph for now std::string args = StringUtils::Format("%d:%d:%d:%d:%d:%d:%d", m_pCodecContext->width, m_pCodecContext->height, m_pCodecContext->pix_fmt, m_pCodecContext->time_base.num ? m_pCodecContext->time_base.num : 1, m_pCodecContext->time_base.num ? m_pCodecContext->time_base.den : 1, m_pCodecContext->sample_aspect_ratio.num != 0 ? m_pCodecContext->sample_aspect_ratio.num : 1, m_pCodecContext->sample_aspect_ratio.num != 0 ? m_pCodecContext->sample_aspect_ratio.den : 1); if ((result = avfilter_graph_create_filter(&m_pFilterIn, srcFilter, "src", args.c_str(), NULL, m_pFilterGraph)) < 0) { CLog::Log(LOGERROR, "CDVDVideoCodecFFmpeg::FilterOpen - avfilter_graph_create_filter: src"); return result; } if ((result = avfilter_graph_create_filter(&m_pFilterOut, outFilter, "out", NULL, NULL, m_pFilterGraph)) < 0) { CLog::Log(LOGERROR, "CDVDVideoCodecFFmpeg::FilterOpen - avfilter_graph_create_filter: out"); return result; } if ((result = av_opt_set_int_list(m_pFilterOut, "pix_fmts", &m_formats[0], AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0) { CLog::Log(LOGERROR, "CDVDVideoCodecFFmpeg::FilterOpen - failed settings pix formats"); return result; } if (!filters.empty()) { AVFilterInOut* outputs = avfilter_inout_alloc(); AVFilterInOut* inputs = avfilter_inout_alloc(); outputs->name = av_strdup("in"); outputs->filter_ctx = m_pFilterIn; outputs->pad_idx = 0; outputs->next = NULL; inputs->name = av_strdup("out"); inputs->filter_ctx = m_pFilterOut; inputs->pad_idx = 0; inputs->next = NULL; if ((result = avfilter_graph_parse_ptr(m_pFilterGraph, (const char*)m_filters.c_str(), &inputs, &outputs, NULL)) < 0) { CLog::Log(LOGERROR, "CDVDVideoCodecFFmpeg::FilterOpen - avfilter_graph_parse"); return result; } avfilter_inout_free(&outputs); avfilter_inout_free(&inputs); } else { if ((result = avfilter_link(m_pFilterIn, 0, m_pFilterOut, 0)) < 0) { CLog::Log(LOGERROR, "CDVDVideoCodecFFmpeg::FilterOpen - avfilter_link"); return result; } } if ((result = avfilter_graph_config(m_pFilterGraph, NULL)) < 0) { CLog::Log(LOGERROR, "CDVDVideoCodecFFmpeg::FilterOpen - avfilter_graph_config"); return result; } return result; } void CDVDVideoCodecFFmpeg::FilterClose() { if (m_pFilterGraph) { avfilter_graph_free(&m_pFilterGraph); // Disposed by above code m_pFilterIn = NULL; m_pFilterOut = NULL; } } int CDVDVideoCodecFFmpeg::FilterProcess(AVFrame* frame) { int result; if (frame) { result = av_buffersrc_add_frame(m_pFilterIn, frame); if (result < 0) { CLog::Log(LOGERROR, "CDVDVideoCodecFFmpeg::FilterProcess - av_buffersrc_add_frame"); return VC_ERROR; } } result = av_buffersink_get_frame(m_pFilterOut, m_pFilterFrame); if(result == AVERROR(EAGAIN) || result == AVERROR_EOF) return VC_BUFFER; else if(result < 0) { CLog::Log(LOGERROR, "CDVDVideoCodecFFmpeg::FilterProcess - av_buffersink_get_frame"); return VC_ERROR; } av_frame_unref(m_pFrame); av_frame_move_ref(m_pFrame, m_pFilterFrame); return VC_PICTURE; } unsigned CDVDVideoCodecFFmpeg::GetConvergeCount() { if(m_pHardware) return m_iLastKeyframe; else return 0; } unsigned CDVDVideoCodecFFmpeg::GetAllowedReferences() { if(m_pHardware) return m_pHardware->GetAllowedReferences(); else return 0; } bool CDVDVideoCodecFFmpeg::GetCodecStats(double &pts, int &droppedPics) { pts = m_decoderPts; if (m_skippedDeint) droppedPics = m_skippedDeint; else droppedPics = -1; return true; } void CDVDVideoCodecFFmpeg::SetCodecControl(int flags) { m_codecControlFlags = flags; } void CDVDVideoCodecFFmpeg::SetHardware(IHardwareDecoder* hardware) { if (m_pHardware) m_disposeDecoders.push_back(m_pHardware); m_pHardware = hardware; UpdateName(); } void CDVDVideoCodecFFmpeg::DisposeHWDecoders() { while (!m_disposeDecoders.empty()) { m_disposeDecoders.back()->Release(); m_disposeDecoders.pop_back(); } }
gpl-2.0
fishbaoz/coreboot
src/cpu/amd/quadcore/amd_sibling.c
10
3141
/* * This file is part of the coreboot project. * * Copyright (C) 2007 Advanced Micro Devices, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc. */ #include <console/console.h> #include <cpu/cpu.h> #include <cpu/x86/lapic.h> #include <device/device.h> #include <device/pci.h> #include <pc80/mc146818rtc.h> #include <smp/spinlock.h> #include <cpu/x86/mtrr.h> #include <cpu/amd/model_10xxx_msr.h> #include <cpu/amd/model_10xxx_rev.h> #include <cpu/amd/amdfam10_sysconf.h> extern device_t get_node_pci(u32 nodeid, u32 fn); #if 0 static int first_time = 1; #endif #include "quadcore_id.c" static u32 get_max_siblings(u32 nodes) { device_t dev; u32 nodeid; u32 siblings=0; //get max siblings from all the nodes for(nodeid=0; nodeid<nodes; nodeid++){ int j; dev = get_node_pci(nodeid, 3); j = (pci_read_config32(dev, 0xe8) >> 12) & 3; if(siblings < j) { siblings = j; } } return siblings; } static void enable_apic_ext_id(u32 nodes) { device_t dev; u32 nodeid; //enable APIC_EXIT_ID all the nodes for(nodeid=0; nodeid<nodes; nodeid++){ u32 val; dev = get_node_pci(nodeid, 0); val = pci_read_config32(dev, 0x68); val |= (1<<17)|(1<<18); pci_write_config32(dev, 0x68, val); } } u32 get_apicid_base(u32 ioapic_num) { u32 apicid_base; u32 siblings; u32 nb_cfg_54; u32 disable_siblings = !CONFIG_LOGICAL_CPUS; get_option(&disable_siblings, "multi_core"); siblings = get_max_siblings(sysconf.nodes); if(sysconf.bsp_apicid > 0) { // io apic could start from 0 return 0; } else if (sysconf.enabled_apic_ext_id) { // enabled ext id but bsp = 0 return 1; } nb_cfg_54 = read_nb_cfg_54(); //Construct apicid_base if((!disable_siblings) && (siblings>0) ) { /* for 8 way dual core, we will used up apicid 16:16, actually 16 is not allowed by current kernel and the kernel will try to get one that is small than 16 to make io apic work. I don't know when the kernel can support 256 apic id. (APIC_EXT_ID is enabled) */ //4:10 for two way 8:12 for four way 16:16 for eight way //Use CONFIG_MAX_PHYSICAL_CPUS instead of nodes for better consistency? apicid_base = nb_cfg_54 ? (siblings+1) * sysconf.nodes : 8 * siblings + sysconf.nodes; } else { apicid_base = sysconf.nodes; } if((apicid_base+ioapic_num-1)>0xf) { // We need to enable APIC EXT ID printk(BIOS_SPEW, "if the IO APIC device doesn't support 256 apic id, \n you need to set CONFIG_ENABLE_APIC_EXT_ID in MB Option.lb so you can spare 16 id for ioapic\n"); enable_apic_ext_id(sysconf.nodes); } return apicid_base; }
gpl-2.0
PhoenixiaGaming/WoP-v4
src/server/scripts/EasternKingdoms/ScarletEnclave/zone_the_scarlet_enclave.cpp
10
4165
/* * Copyright (C) 2008-2014 TrinityCore <http://www.trinitycore.org/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "ScriptMgr.h" #include "ScriptedCreature.h" #include "PassiveAI.h" #include "Player.h" /*#### ## npc_valkyr_battle_maiden ####*/ enum Spells_VBM { SPELL_REVIVE = 51918 }; enum Says_VBM { WHISPER_REVIVE = 0 }; class npc_valkyr_battle_maiden : public CreatureScript { public: npc_valkyr_battle_maiden() : CreatureScript("npc_valkyr_battle_maiden") { } CreatureAI* GetAI(Creature* creature) const override { return new npc_valkyr_battle_maidenAI(creature); } struct npc_valkyr_battle_maidenAI : public PassiveAI { npc_valkyr_battle_maidenAI(Creature* creature) : PassiveAI(creature) { Initialize(); } void Initialize() { FlyBackTimer = 500; phase = 0; x = 0.f; y = 0.f; z = 0.f; } uint32 FlyBackTimer; float x, y, z; uint32 phase; void Reset() override { me->setActive(true); me->SetVisible(false); me->SetFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NON_ATTACKABLE); me->SetCanFly(true); me->GetPosition(x, y, z); z += 4.0f; x -= 3.5f; y -= 5.0f; me->GetMotionMaster()->Clear(false); me->SetPosition(x, y, z, 0.0f); } void UpdateAI(uint32 diff) override { if (FlyBackTimer <= diff) { Player* player = NULL; if (me->IsSummon()) if (Unit* summoner = me->ToTempSummon()->GetSummoner()) player = summoner->ToPlayer(); if (!player) phase = 3; switch (phase) { case 0: me->SetWalk(false); me->HandleEmoteCommand(EMOTE_STATE_FLYGRABCLOSED); FlyBackTimer = 500; break; case 1: player->GetClosePoint(x, y, z, me->GetObjectSize()); z += 2.5f; x -= 2.0f; y -= 1.5f; me->GetMotionMaster()->MovePoint(0, x, y, z); me->SetTarget(player->GetGUID()); me->SetVisible(true); FlyBackTimer = 4500; break; case 2: if (!player->IsResurrectRequested()) { me->HandleEmoteCommand(EMOTE_ONESHOT_CUSTOM_SPELL_01); DoCast(player, SPELL_REVIVE, true); Talk(WHISPER_REVIVE, player); } FlyBackTimer = 5000; break; case 3: me->SetVisible(false); FlyBackTimer = 3000; break; case 4: me->DisappearAndDie(); break; default: //Nothing To DO break; } ++phase; } else FlyBackTimer-=diff; } }; }; void AddSC_the_scarlet_enclave() { new npc_valkyr_battle_maiden(); }
gpl-2.0
Ankso/TrinityCore
src/server/scripts/Northrend/zone_dragonblight.cpp
10
28180
/* * Copyright (C) 2008-2016 TrinityCore <http://www.trinitycore.org/> * Copyright (C) 2006-2009 ScriptDev2 <https://scriptdev2.svn.sourceforge.net/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ /* ScriptData SDName: Dragonblight SD%Complete: 100 SDComment: SDCategory: Dragonblight EndScriptData */ /* ContentData npc_alexstrasza_wr_gate EndContentData */ #include "ScriptMgr.h" #include "ScriptedCreature.h" #include "ScriptedGossip.h" #include "SpellScript.h" #include "SpellAuraEffects.h" #include "ScriptedEscortAI.h" #include "Vehicle.h" #include "CombatAI.h" #include "Player.h" /*##### # npc_commander_eligor_dawnbringer #####*/ enum CommanderEligorDawnbringer { MODEL_IMAGE_OF_KELTHUZAD = 24787, // Image of Kel'Thuzad MODEL_IMAGE_OF_SAPPHIRON = 24788, // Image of Sapphiron MODEL_IMAGE_OF_RAZUVIOUS = 24799, // Image of Razuvious MODEL_IMAGE_OF_GOTHIK = 24804, // Image of Gothik MODEL_IMAGE_OF_THANE = 24802, // Image of Thane Korth'azz MODEL_IMAGE_OF_BLAUMEUX = 24794, // Image of Lady Blaumeux MODEL_IMAGE_OF_ZELIEK = 24800, // Image of Sir Zeliek MODEL_IMAGE_OF_PATCHWERK = 24798, // Image of Patchwerk MODEL_IMAGE_OF_GROBBULUS = 24792, // Image of Grobbulus MODEL_IMAGE_OF_THADDIUS = 24801, // Image of Thaddius MODEL_IMAGE_OF_GLUTH = 24803, // Image of Gluth MODEL_IMAGE_OF_ANUBREKHAN = 24789, // Image of Anub'rekhan MODEL_IMAGE_OF_FAERLINA = 24790, // Image of Faerlina MODEL_IMAGE_OF_MAEXXNA = 24796, // Image of Maexxna MODEL_IMAGE_OF_NOTH = 24797, // Image of Noth MODEL_IMAGE_OF_HEIGAN = 24793, // Image of Heigan MODEL_IMAGE_OF_LOATHEB = 24795, // Image of Loatheb NPC_IMAGE_OF_KELTHUZAD = 27766, // Image of Kel'Thuzad NPC_IMAGE_OF_SAPPHIRON = 27767, // Image of Sapphiron NPC_IMAGE_OF_RAZUVIOUS = 27768, // Image of Razuvious NPC_IMAGE_OF_GOTHIK = 27769, // Image of Gothik NPC_IMAGE_OF_THANE = 27770, // Image of Thane Korth'azz NPC_IMAGE_OF_BLAUMEUX = 27771, // Image of Lady Blaumeux NPC_IMAGE_OF_ZELIEK = 27772, // Image of Sir Zeliek NPC_IMAGE_OF_PATCHWERK = 27773, // Image of Patchwerk NPC_IMAGE_OF_GROBBULUS = 27774, // Image of Grobbulus NPC_IMAGE_OF_THADDIUS = 27775, // Image of Thaddius NPC_IMAGE_OF_GLUTH = 27782, // Image of Gluth NPC_IMAGE_OF_ANUBREKHAN = 27776, // Image of Anub'rekhan NPC_IMAGE_OF_FAERLINA = 27777, // Image of Faerlina NPC_IMAGE_OF_MAEXXNA = 27778, // Image of Maexxna NPC_IMAGE_OF_NOTH = 27779, // Image of Noth NPC_IMAGE_OF_HEIGAN = 27780, // Image of Heigan NPC_IMAGE_OF_LOATHEB = 27781, // Image of Loatheb NPC_INFANTRYMAN = 27160, // Add in case I randomize the spawning NPC_SENTINAL = 27162, NPC_BATTLE_MAGE = 27164, // Five platforms to choose from SAY_PINNACLE = 0, SAY_DEATH_KNIGHT_WING = 1, SAY_ABOMINATION_WING = 2, SAY_SPIDER_WING = 3, SAY_PLAGUE_WING = 4, // Used in all talks SAY_TALK_COMPLETE = 5, // Pinnacle of Naxxramas SAY_SAPPHIRON = 6, SAY_KELTHUZAD_1 = 7, SAY_KELTHUZAD_2 = 8, SAY_KELTHUZAD_3 = 9, // Death knight wing of Naxxramas SAY_RAZUVIOUS = 10, SAY_GOTHIK = 11, SAY_DEATH_KNIGHTS_1 = 12, SAY_DEATH_KNIGHTS_2 = 13, SAY_DEATH_KNIGHTS_3 = 14, SAY_DEATH_KNIGHTS_4 = 15, // Blighted abomination wing of Naxxramas SAY_PATCHWERK = 16, SAY_GROBBULUS = 17, SAY_GLUTH = 18, SAY_THADDIUS = 19, // Accursed spider wing of Naxxramas SAY_ANUBREKHAN = 20, SAY_FAERLINA = 21, SAY_MAEXXNA = 22, // Dread plague wing of Naxxramas SAY_NOTH = 23, SAY_HEIGAN_1 = 24, SAY_HEIGAN_2 = 25, SAY_LOATHEB = 26, SPELL_HEROIC_IMAGE_CHANNEL = 49519, EVENT_START_RANDOM = 1, EVENT_MOVE_TO_POINT = 2, EVENT_TALK_COMPLETE = 3, EVENT_GET_TARGETS = 4, EVENT_KELTHUZAD_2 = 5, EVENT_KELTHUZAD_3 = 6, EVENT_DEATH_KNIGHTS_2 = 7, EVENT_DEATH_KNIGHTS_3 = 8, EVENT_DEATH_KNIGHTS_4 = 9, EVENT_HEIGAN_2 = 10 }; uint32 const AudienceMobs[3] = { NPC_INFANTRYMAN, NPC_SENTINAL, NPC_BATTLE_MAGE }; Position const PosTalkLocations[6] = { { 3805.453f, -682.9075f, 222.2917f, 2.793398f }, // Pinnacle of Naxxramas { 3807.508f, -691.0882f, 221.9688f, 2.094395f }, // Death knight wing of Naxxramas { 3797.228f, -690.3555f, 222.5019f, 1.134464f }, // Blighted abomination wing of Naxxramas { 3804.038f, -672.3098f, 222.5019f, 4.578917f }, // Accursed spider wing of Naxxramas { 3815.097f, -680.2596f, 221.9777f, 2.86234f }, // Dread plague wing of Naxxramas { 3798.05f, -680.611f, 222.9825f, 6.038839f }, // Home }; class npc_commander_eligor_dawnbringer : public CreatureScript { public: npc_commander_eligor_dawnbringer() : CreatureScript("npc_commander_eligor_dawnbringer") {} struct npc_commander_eligor_dawnbringerAI : public ScriptedAI { npc_commander_eligor_dawnbringerAI(Creature* creature) : ScriptedAI(creature) { talkWing = 0; } void Reset() override { talkWing = 0; for (ObjectGuid& guid : audienceList) guid.Clear(); for (ObjectGuid& guid : imageList) guid.Clear(); _events.ScheduleEvent(EVENT_GET_TARGETS, 5000); _events.ScheduleEvent(EVENT_START_RANDOM, 20000); } void MovementInform(uint32 type, uint32 id) override { if (type == POINT_MOTION_TYPE) { if (id == 1) { me->SetFacingTo(PosTalkLocations[talkWing].GetOrientation()); TurnAudience(); switch (talkWing) { case 0: // Pinnacle of Naxxramas { switch (urand (0, 1)) { case 0: ChangeImage(NPC_IMAGE_OF_KELTHUZAD, MODEL_IMAGE_OF_KELTHUZAD, SAY_KELTHUZAD_1); _events.ScheduleEvent(EVENT_KELTHUZAD_2, 8000); break; case 1: ChangeImage(NPC_IMAGE_OF_SAPPHIRON, MODEL_IMAGE_OF_SAPPHIRON, SAY_SAPPHIRON); break; } } break; case 1: // Death knight wing of Naxxramas { switch (urand (0, 2)) { case 0: ChangeImage(NPC_IMAGE_OF_RAZUVIOUS, MODEL_IMAGE_OF_RAZUVIOUS, SAY_RAZUVIOUS); break; case 1: ChangeImage(NPC_IMAGE_OF_GOTHIK, MODEL_IMAGE_OF_GOTHIK, SAY_GOTHIK); break; case 2: ChangeImage(NPC_IMAGE_OF_THANE, MODEL_IMAGE_OF_THANE, SAY_DEATH_KNIGHTS_1); _events.ScheduleEvent(EVENT_DEATH_KNIGHTS_2, 10000); break; } } break; case 2: // Blighted abomination wing of Naxxramas { switch (urand (0, 3)) { case 0: ChangeImage(NPC_IMAGE_OF_PATCHWERK, MODEL_IMAGE_OF_PATCHWERK, SAY_PATCHWERK); break; case 1: ChangeImage(NPC_IMAGE_OF_GROBBULUS, MODEL_IMAGE_OF_GROBBULUS, SAY_GROBBULUS); break; case 2: ChangeImage(NPC_IMAGE_OF_THADDIUS, MODEL_IMAGE_OF_THADDIUS, SAY_THADDIUS); break; case 3: ChangeImage(NPC_IMAGE_OF_GLUTH, MODEL_IMAGE_OF_GLUTH, SAY_GLUTH); break; } } break; case 3: // Accursed spider wing of Naxxramas { switch (urand (0, 2)) { case 0: ChangeImage(NPC_IMAGE_OF_ANUBREKHAN, MODEL_IMAGE_OF_ANUBREKHAN, SAY_ANUBREKHAN); break; case 1: ChangeImage(NPC_IMAGE_OF_FAERLINA, MODEL_IMAGE_OF_FAERLINA, SAY_FAERLINA); break; case 2: ChangeImage(NPC_IMAGE_OF_MAEXXNA, MODEL_IMAGE_OF_MAEXXNA, SAY_MAEXXNA); break; } } break; case 4: // Dread plague wing of Naxxramas { switch (urand (0, 2)) { case 0: ChangeImage(NPC_IMAGE_OF_NOTH, MODEL_IMAGE_OF_NOTH, SAY_NOTH); break; case 1: ChangeImage(NPC_IMAGE_OF_HEIGAN, MODEL_IMAGE_OF_HEIGAN, SAY_HEIGAN_1); _events.ScheduleEvent(EVENT_HEIGAN_2, 8000); break; case 2: ChangeImage(NPC_IMAGE_OF_LOATHEB, MODEL_IMAGE_OF_LOATHEB, SAY_LOATHEB); break; } } break; case 5: // Home _events.ScheduleEvent(EVENT_START_RANDOM, 30000); break; } } } } void StoreTargets() { uint8 creaturecount; creaturecount = 0; for (uint8 ii = 0; ii < 3; ++ii) { std::list<Creature*> creatureList; GetCreatureListWithEntryInGrid(creatureList, me, AudienceMobs[ii], 15.0f); for (Creature* creature : creatureList) { audienceList[creaturecount] = creature->GetGUID(); ++creaturecount; } } if (Creature* creature = me->FindNearestCreature(NPC_IMAGE_OF_KELTHUZAD, 20.0f, true)) imageList[0] = creature->GetGUID(); if (Creature* creature = me->FindNearestCreature(NPC_IMAGE_OF_RAZUVIOUS, 20.0f, true)) imageList[1] = creature->GetGUID(); if (Creature* creature = me->FindNearestCreature(NPC_IMAGE_OF_PATCHWERK, 20.0f, true)) imageList[2] = creature->GetGUID(); if (Creature* creature = me->FindNearestCreature(NPC_IMAGE_OF_ANUBREKHAN, 20.0f, true)) imageList[3] = creature->GetGUID(); if (Creature* creature = me->FindNearestCreature(NPC_IMAGE_OF_NOTH, 20.0f, true)) imageList[4] = creature->GetGUID(); } void ChangeImage(uint32 entry, uint32 model, uint8 text) { if (Creature* creature = ObjectAccessor::GetCreature(*me, imageList[talkWing])) { Talk(text); creature->SetEntry(entry); creature->SetDisplayId(model); creature->CastSpell(creature, SPELL_HEROIC_IMAGE_CHANNEL); _events.ScheduleEvent(EVENT_TALK_COMPLETE, 40000); } } void TurnAudience() { for (uint8 i = 0; i < 10; ++i) { if (Creature* creature = ObjectAccessor::GetCreature(*me, audienceList[i])) creature->SetFacingToObject(me); } } void UpdateAI(uint32 diff) override { _events.Update(diff); while (uint32 eventId = _events.ExecuteEvent()) { switch (eventId) { case EVENT_START_RANDOM: talkWing = urand (0, 4); Talk(talkWing); _events.ScheduleEvent(EVENT_MOVE_TO_POINT, 8000); break; case EVENT_MOVE_TO_POINT: me->SetWalk(true); me->GetMotionMaster()->Clear(); me->GetMotionMaster()->MovePoint(1, PosTalkLocations[talkWing].m_positionX, PosTalkLocations[talkWing].m_positionY, PosTalkLocations[talkWing].m_positionZ); break; case EVENT_TALK_COMPLETE: talkWing = 5; Talk(talkWing); _events.ScheduleEvent(EVENT_MOVE_TO_POINT, 5000); break; case EVENT_GET_TARGETS: StoreTargets(); break; case EVENT_KELTHUZAD_2: Talk(SAY_KELTHUZAD_2); _events.ScheduleEvent(EVENT_KELTHUZAD_3, 8000); break; case EVENT_KELTHUZAD_3: Talk(SAY_KELTHUZAD_3); break; case EVENT_DEATH_KNIGHTS_2: Talk(SAY_DEATH_KNIGHTS_2); if (Creature* creature = ObjectAccessor::GetCreature(*me, imageList[talkWing])) { creature->SetEntry(NPC_IMAGE_OF_BLAUMEUX); creature->SetDisplayId(MODEL_IMAGE_OF_BLAUMEUX); } _events.ScheduleEvent(EVENT_DEATH_KNIGHTS_3, 10000); break; case EVENT_DEATH_KNIGHTS_3: Talk(SAY_DEATH_KNIGHTS_3); if (Creature* creature = ObjectAccessor::GetCreature(*me, imageList[talkWing])) { creature->SetEntry(NPC_IMAGE_OF_ZELIEK); creature->SetDisplayId(MODEL_IMAGE_OF_ZELIEK); } _events.ScheduleEvent(EVENT_DEATH_KNIGHTS_4, 10000); break; case EVENT_DEATH_KNIGHTS_4: Talk(SAY_DEATH_KNIGHTS_4); break; case EVENT_HEIGAN_2: Talk(SAY_HEIGAN_2); break; default: break; } } DoMeleeAttackIfReady(); } private: EventMap _events; ObjectGuid audienceList[10]; ObjectGuid imageList[5]; uint8 talkWing; }; CreatureAI* GetAI(Creature* creature) const override { return new npc_commander_eligor_dawnbringerAI(creature); } }; /*###### ## Quest Strengthen the Ancients (12096|12092) ######*/ enum StrengthenAncientsMisc { SAY_WALKER_FRIENDLY = 0, SAY_WALKER_ENEMY = 1, SAY_LOTHALOR = 0, SPELL_CREATE_ITEM_BARK = 47550, SPELL_CONFUSED = 47044, NPC_LOTHALOR = 26321, FACTION_WALKER_ENEMY = 14, }; class spell_q12096_q12092_dummy : public SpellScriptLoader // Strengthen the Ancients: On Interact Dummy to Woodlands Walker { public: spell_q12096_q12092_dummy() : SpellScriptLoader("spell_q12096_q12092_dummy") { } class spell_q12096_q12092_dummy_SpellScript : public SpellScript { PrepareSpellScript(spell_q12096_q12092_dummy_SpellScript); void HandleDummy(SpellEffIndex /*effIndex*/) { uint32 roll = rand32() % 2; Creature* tree = GetHitCreature(); Player* player = GetCaster()->ToPlayer(); if (!tree || !player) return; tree->RemoveFlag(UNIT_NPC_FLAGS, UNIT_NPC_FLAG_SPELLCLICK); if (roll == 1) // friendly version { tree->CastSpell(player, SPELL_CREATE_ITEM_BARK); tree->AI()->Talk(SAY_WALKER_FRIENDLY, player); tree->DespawnOrUnsummon(1000); } else if (roll == 0) // enemy version { tree->AI()->Talk(SAY_WALKER_ENEMY, player); tree->setFaction(FACTION_WALKER_ENEMY); tree->Attack(player, true); } } void Register() override { OnEffectHitTarget += SpellEffectFn(spell_q12096_q12092_dummy_SpellScript::HandleDummy, EFFECT_0, SPELL_EFFECT_DUMMY); } }; SpellScript* GetSpellScript() const override { return new spell_q12096_q12092_dummy_SpellScript(); } }; class spell_q12096_q12092_bark : public SpellScriptLoader // Bark of the Walkers { public: spell_q12096_q12092_bark() : SpellScriptLoader("spell_q12096_q12092_bark") { } class spell_q12096_q12092_bark_SpellScript : public SpellScript { PrepareSpellScript(spell_q12096_q12092_bark_SpellScript); void HandleDummy(SpellEffIndex /*effIndex*/) { Creature* lothalor = GetHitCreature(); if (!lothalor || lothalor->GetEntry() != NPC_LOTHALOR) return; lothalor->AI()->Talk(SAY_LOTHALOR); lothalor->RemoveAura(SPELL_CONFUSED); lothalor->DespawnOrUnsummon(4000); } void Register() override { OnEffectHitTarget += SpellEffectFn(spell_q12096_q12092_bark_SpellScript::HandleDummy, EFFECT_0, SPELL_EFFECT_DUMMY); } }; SpellScript* GetSpellScript() const override { return new spell_q12096_q12092_bark_SpellScript(); } }; /*###### ## Quest: Defending Wyrmrest Temple ID: 12372 ######*/ enum WyrmDefenderEnum { // Quest data QUEST_DEFENDING_WYRMREST_TEMPLE = 12372, GOSSIP_TEXTID_DEF1 = 12899, // Gossip data GOSSIP_TEXTID_DEF2 = 12900, // Spells data SPELL_CHARACTER_SCRIPT = 49213, SPELL_DEFENDER_ON_LOW_HEALTH_EMOTE = 52421, // ID - 52421 Wyrmrest Defender: On Low Health Boss Emote to Controller - Random /self/ SPELL_RENEW = 49263, // cast to heal drakes SPELL_WYRMREST_DEFENDER_MOUNT = 49256, // Texts data WHISPER_MOUNTED = 0, BOSS_EMOTE_ON_LOW_HEALTH = 2 }; #define GOSSIP_ITEM_1 "We need to get into the fight. Are you ready?" class npc_wyrmrest_defender : public CreatureScript { public: npc_wyrmrest_defender() : CreatureScript("npc_wyrmrest_defender") { } bool OnGossipHello(Player* player, Creature* creature) override { if (player->GetQuestStatus(QUEST_DEFENDING_WYRMREST_TEMPLE) == QUEST_STATUS_INCOMPLETE) { player->ADD_GOSSIP_ITEM(GOSSIP_ICON_CHAT, GOSSIP_ITEM_1, GOSSIP_SENDER_MAIN, GOSSIP_ACTION_INFO_DEF+1); player->SEND_GOSSIP_MENU(GOSSIP_TEXTID_DEF1, creature->GetGUID()); } else player->SEND_GOSSIP_MENU(player->GetGossipTextId(creature), creature->GetGUID()); return true; } bool OnGossipSelect(Player* player, Creature* creature, uint32 /*sender*/, uint32 action) override { player->PlayerTalkClass->ClearMenus(); if (action == GOSSIP_ACTION_INFO_DEF+1) { player->SEND_GOSSIP_MENU(GOSSIP_TEXTID_DEF2, creature->GetGUID()); // Makes player cast trigger spell for 49207 on self player->CastSpell(player, SPELL_CHARACTER_SCRIPT, true); // The gossip should not auto close } return true; } struct npc_wyrmrest_defenderAI : public VehicleAI { npc_wyrmrest_defenderAI(Creature* creature) : VehicleAI(creature) { Initialize(); } void Initialize() { hpWarningReady = true; renewRecoveryCanCheck = false; RenewRecoveryChecker = 0; } bool hpWarningReady; bool renewRecoveryCanCheck; uint32 RenewRecoveryChecker; void Reset() override { Initialize(); } void UpdateAI(uint32 diff) override { // Check system for Health Warning should happen first time whenever get under 30%, // after it should be able to happen only after recovery of last renew is fully done (20 sec), // next one used won't interfere if (hpWarningReady && me->GetHealthPct() <= 30.0f) { me->CastSpell(me, SPELL_DEFENDER_ON_LOW_HEALTH_EMOTE); hpWarningReady = false; } if (renewRecoveryCanCheck) { if (RenewRecoveryChecker <= diff) { renewRecoveryCanCheck = false; hpWarningReady = true; } else RenewRecoveryChecker -= diff; } } void SpellHit(Unit* /*caster*/, SpellInfo const* spell) override { switch (spell->Id) { case SPELL_WYRMREST_DEFENDER_MOUNT: Talk(WHISPER_MOUNTED, me->GetCharmerOrOwner()); me->RemoveFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_IMMUNE_TO_PC | UNIT_FLAG_IMMUNE_TO_NPC); me->SetFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_PVP_ATTACKABLE); break; // Both below are for checking low hp warning case SPELL_DEFENDER_ON_LOW_HEALTH_EMOTE: Talk(BOSS_EMOTE_ON_LOW_HEALTH, me->GetCharmerOrOwner()); break; case SPELL_RENEW: if (!hpWarningReady && RenewRecoveryChecker <= 100) { RenewRecoveryChecker = 20000; } renewRecoveryCanCheck = true; break; } } }; CreatureAI* GetAI(Creature* creature) const override { return new npc_wyrmrest_defenderAI(creature); } }; /*##### # npc_torturer_lecraft #####*/ enum TorturerLeCraft { SPELL_HEMORRHAGE = 30478, SPELL_KIDNEY_SHOT = 30621, SPELL_HIGH_EXECUTORS_BRANDING_IRON = 48603, NPC_TORTURER_LECRAFT = 27394, EVENT_HEMORRHAGE = 1, EVENT_KIDNEY_SHOT = 2, SAY_AGGRO = 0 }; class npc_torturer_lecraft : public CreatureScript { public: npc_torturer_lecraft() : CreatureScript("npc_torturer_lecraft") {} struct npc_torturer_lecraftAI : public ScriptedAI { npc_torturer_lecraftAI(Creature* creature) : ScriptedAI(creature) { _textCounter = 1; } void Reset() override { _textCounter = 1; _playerGUID.Clear(); } void EnterCombat(Unit* who) override { _events.ScheduleEvent(EVENT_HEMORRHAGE, urand(5000, 8000)); _events.ScheduleEvent(EVENT_KIDNEY_SHOT, urand(12000, 15000)); if (Player* player = who->ToPlayer()) Talk (SAY_AGGRO, player); } void SpellHit(Unit* caster, const SpellInfo* spell) override { if (spell->Id != SPELL_HIGH_EXECUTORS_BRANDING_IRON) return; if (Player* player = caster->ToPlayer()) { if (_textCounter == 1) _playerGUID = player->GetGUID(); if (_playerGUID != player->GetGUID()) return; Talk(_textCounter, player); if (_textCounter == 5) player->KilledMonsterCredit(NPC_TORTURER_LECRAFT); ++_textCounter; if (_textCounter == 13) _textCounter = 6; } } void UpdateAI(uint32 diff) override { if (!UpdateVictim()) return; _events.Update(diff); while (uint32 eventId = _events.ExecuteEvent()) { switch (eventId) { case EVENT_HEMORRHAGE: DoCastVictim(SPELL_HEMORRHAGE); _events.ScheduleEvent(EVENT_HEMORRHAGE, urand(12000, 168000)); break; case EVENT_KIDNEY_SHOT: DoCastVictim(SPELL_KIDNEY_SHOT); _events.ScheduleEvent(EVENT_KIDNEY_SHOT, urand(20000, 26000)); break; default: break; } } DoMeleeAttackIfReady(); } private: EventMap _events; uint8 _textCounter; ObjectGuid _playerGUID; }; CreatureAI* GetAI(Creature* creature) const override { return new npc_torturer_lecraftAI(creature); } }; void AddSC_dragonblight() { new npc_commander_eligor_dawnbringer(); new spell_q12096_q12092_dummy(); new spell_q12096_q12092_bark(); new npc_wyrmrest_defender(); new npc_torturer_lecraft(); }
gpl-2.0